本文整理汇总了Java中org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.getParseException方法的典型用法代码示例。如果您正苦于以下问题:Java JobHistoryParser.getParseException方法的具体用法?Java JobHistoryParser.getParseException怎么用?Java JobHistoryParser.getParseException使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser
的用法示例。
在下文中一共展示了JobHistoryParser.getParseException方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadFullHistoryData
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
protected synchronized void loadFullHistoryData(boolean loadTasks,
Path historyFileAbsolute) throws IOException {
LOG.info("Loading history file: [" + historyFileAbsolute + "]");
if (this.jobInfo != null) {
return;
}
if (historyFileAbsolute != null) {
JobHistoryParser parser = null;
try {
parser = createJobHistoryParser(historyFileAbsolute);
this.jobInfo = parser.parse();
} catch (IOException e) {
throw new YarnRuntimeException("Could not load history file "
+ historyFileAbsolute, e);
}
IOException parseException = parser.getParseException();
if (parseException != null) {
throw new YarnRuntimeException(
"Could not parse history file " + historyFileAbsolute,
parseException);
}
} else {
throw new IOException("History file not found");
}
if (loadTasks) {
loadAllTasks();
LOG.info("TaskInfo loaded");
}
}
示例2: parsePreviousJobHistory
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
private void parsePreviousJobHistory() throws IOException {
FSDataInputStream in = getPreviousJobHistoryStream(getConfig(),
appAttemptID);
JobHistoryParser parser = new JobHistoryParser(in);
JobInfo jobInfo = parser.parse();
Exception parseException = parser.getParseException();
if (parseException != null) {
LOG.info("Got an error parsing job-history file" +
", ignoring incomplete events.", parseException);
}
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator =
taskInfo.getAllTaskAttempts().entrySet().iterator();
while (taskAttemptIterator.hasNext()) {
Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
taskAttemptIterator.remove();
}
}
completedTasksFromPreviousRun
.put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
LOG.info("Read from history task "
+ TypeConverter.toYarn(taskInfo.getTaskId()));
}
}
LOG.info("Read completed tasks from history "
+ completedTasksFromPreviousRun.size());
recoveredJobStartTime = jobInfo.getLaunchTime();
// recover AMInfos
List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos();
if (jhAmInfoList != null) {
for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) {
AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
jhAmInfo.getNodeManagerHttpPort());
amInfos.add(amInfo);
}
}
}
示例3: fetchData
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Override
public MapReduceApplicationData fetchData(AnalyticJob job) throws IOException {
DataFiles files = getHistoryFiles(job);
String confFile = files.getJobConfPath();
String histFile = files.getJobHistPath();
String appId = job.getAppId();
String jobId = Utils.getJobIdFromApplicationId(appId);
MapReduceApplicationData jobData = new MapReduceApplicationData();
jobData.setAppId(appId).setJobId(jobId);
// Fetch job config
Configuration jobConf = new Configuration(false);
jobConf.addResource(_fs.open(new Path(confFile)), confFile);
Properties jobConfProperties = new Properties();
for (Map.Entry<String, String> entry : jobConf) {
jobConfProperties.put(entry.getKey(), entry.getValue());
}
jobData.setJobConf(jobConfProperties);
// Check if job history file is too large and should be throttled
if (_fs.getFileStatus(new Path(histFile)).getLen() > _maxLogSizeInMB * FileUtils.ONE_MB) {
String errMsg = "The history log of MapReduce application: " + appId + " is over the limit size of "
+ _maxLogSizeInMB + " MB, the parsing process gets throttled.";
logger.warn(errMsg);
jobData.setDiagnosticInfo(errMsg);
jobData.setSucceeded(false); // set succeeded to false to avoid heuristic analysis
return jobData;
}
// Analyze job history file
JobHistoryParser parser = new JobHistoryParser(_fs, histFile);
JobHistoryParser.JobInfo jobInfo = parser.parse();
IOException parseException = parser.getParseException();
if (parseException != null) {
throw new RuntimeException("Could not parse history file " + histFile, parseException);
}
jobData.setSubmitTime(jobInfo.getSubmitTime());
jobData.setStartTime(jobInfo.getLaunchTime());
jobData.setFinishTime(jobInfo.getFinishTime());
String state = jobInfo.getJobStatus();
if (state.equals("SUCCEEDED")) {
jobData.setSucceeded(true);
}
else if (state.equals("FAILED")) {
jobData.setSucceeded(false);
jobData.setDiagnosticInfo(jobInfo.getErrorInfo());
} else {
throw new RuntimeException("job neither succeeded or failed. can not process it ");
}
// Fetch job counter
MapReduceCounterData jobCounter = getCounterData(jobInfo.getTotalCounters());
// Fetch task data
Map<TaskID, JobHistoryParser.TaskInfo> allTasks = jobInfo.getAllTasks();
List<JobHistoryParser.TaskInfo> mapperInfoList = new ArrayList<JobHistoryParser.TaskInfo>();
List<JobHistoryParser.TaskInfo> reducerInfoList = new ArrayList<JobHistoryParser.TaskInfo>();
for (JobHistoryParser.TaskInfo taskInfo : allTasks.values()) {
if (taskInfo.getTaskType() == TaskType.MAP) {
mapperInfoList.add(taskInfo);
} else {
reducerInfoList.add(taskInfo);
}
}
if (jobInfo.getTotalMaps() > MAX_SAMPLE_SIZE) {
logger.debug(jobId + " total mappers: " + mapperInfoList.size());
}
if (jobInfo.getTotalReduces() > MAX_SAMPLE_SIZE) {
logger.debug(jobId + " total reducers: " + reducerInfoList.size());
}
MapReduceTaskData[] mapperList = getTaskData(jobId, mapperInfoList);
MapReduceTaskData[] reducerList = getTaskData(jobId, reducerInfoList);
jobData.setCounters(jobCounter).setMapperData(mapperList).setReducerData(reducerList);
return jobData;
}
示例4: testHistoryParsingForFailedAttempts
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testHistoryParsingForFailedAttempts() throws Exception {
LOG.info("STARTING testHistoryParsingForFailedAttempts");
try {
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedAttempt(2, 1, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
String jobhistoryDir = JobHistoryUtils
.getHistoryIntermediateDoneDirForUser(conf);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId)
.getJobIndexInfo();
String jobhistoryFileName = FileNameIndexUtils
.getDoneFileName(jobIndexInfo);
Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
JobHistoryParser parser = new JobHistoryParser(in);
JobInfo jobInfo = parser.parse();
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,
parseException);
int noOffailedAttempts = 0;
Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
for (Task task : job.getTasks().values()) {
TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
for (TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
TypeConverter.fromYarn((taskAttempt.getID())));
// Verify rack-name for all task attempts
Assert.assertEquals("rack-name is incorrect",
taskAttemptInfo.getRackname(), RACK_NAME);
if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
noOffailedAttempts++;
}
}
}
Assert.assertEquals("No of Failed tasks doesn't match.", 2,
noOffailedAttempts);
} finally {
LOG.info("FINISHED testHistoryParsingForFailedAttempts");
}
}
示例5: testCountersForFailedTask
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testCountersForFailedTask() throws Exception {
LOG.info("STARTING testCountersForFailedTask");
try {
Configuration conf = new Configuration();
conf.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.FAILED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
String jobhistoryDir = JobHistoryUtils
.getHistoryIntermediateDoneDirForUser(conf);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId)
.getJobIndexInfo();
String jobhistoryFileName = FileNameIndexUtils
.getDoneFileName(jobIndexInfo);
Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
JobHistoryParser parser = new JobHistoryParser(in);
JobInfo jobInfo = parser.parse();
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,
parseException);
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
Assert.assertNotNull("completed task report has null counters", ct
.getReport().getCounters());
}
} finally {
LOG.info("FINISHED testCountersForFailedTask");
}
}