当前位置: 首页>>代码示例>>Java>>正文


Java JobHistoryParser.parse方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.parse方法的典型用法代码示例。如果您正苦于以下问题:Java JobHistoryParser.parse方法的具体用法?Java JobHistoryParser.parse怎么用?Java JobHistoryParser.parse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser的用法示例。


在下文中一共展示了JobHistoryParser.parse方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFailedJobHistoryWithoutDiagnostics

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Test
public void testFailedJobHistoryWithoutDiagnostics() throws Exception {
  final Path histPath = new Path(getClass().getClassLoader().getResource(
      "job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist")
      .getFile());
  final FileSystem lfs = FileSystem.getLocal(new Configuration());
  final FSDataInputStream fsdis = lfs.open(histPath);
  try {
    JobHistoryParser parser = new JobHistoryParser(fsdis);
    JobInfo info = parser.parse();
    assertEquals("History parsed jobId incorrectly",
        info.getJobId(), JobID.forName("job_1393307629410_0001") );
    assertEquals("Default diagnostics incorrect ", "", info.getErrorInfo());
  } finally {
    fsdis.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例2: getSimulatedJobHistory

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 * Get the simulated job history of a job.
 * @param simulatedJobID - simulated job id.
 * @return - simulated job information.
 * @throws IOException - if an I/O error occurs.
 */
public JobHistoryParser.JobInfo getSimulatedJobHistory(JobID simulatedJobID) 
    throws IOException {
  FileSystem fs = null;
  try {
    String historyFilePath = jtClient.getProxy().
        getJobHistoryLocationForRetiredJob(simulatedJobID);
    Path jhpath = new Path(historyFilePath);
    fs = jhpath.getFileSystem(conf);
    JobHistoryParser jhparser = new JobHistoryParser(fs, jhpath);
    JobHistoryParser.JobInfo jhInfo = jhparser.parse();
    return jhInfo;

  } finally {
    fs.close();
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:23,代码来源:GridmixJobVerification.java

示例3: testTaskAttemptUnsuccessfulCompletionWithoutCounters203

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 * Test compatibility of JobHistoryParser with 2.0.3-alpha history files
 * @throws IOException
 */
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters203() throws IOException 
  { 
    Path histPath = new Path(getClass().getClassLoader().getResource(
      "job_2.0.3-alpha-FAILED.jhist").getFile());
    JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
        (new Configuration()), histPath);
    JobInfo jobInfo = parser.parse(); 
    LOG.info(" job info: " + jobInfo.getJobname() + " "
      + jobInfo.getFinishedMaps() + " " 
      + jobInfo.getTotalMaps() + " " 
      + jobInfo.getJobId() ) ;
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例4: testTaskAttemptUnsuccessfulCompletionWithoutCounters240

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 * Test compatibility of JobHistoryParser with 2.4.0 history files
 * @throws IOException
 */
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters240() throws IOException 
  {
    Path histPath = new Path(getClass().getClassLoader().getResource(
      "job_2.4.0-FAILED.jhist").getFile());
    JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
        (new Configuration()), histPath);
    JobInfo jobInfo = parser.parse(); 
    LOG.info(" job info: " + jobInfo.getJobname() + " "
      + jobInfo.getFinishedMaps() + " "
      + jobInfo.getTotalMaps() + " "
      + jobInfo.getJobId() );
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例5: testTaskAttemptUnsuccessfulCompletionWithoutCounters0239

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 * Test compatibility of JobHistoryParser with 0.23.9 history files
 * @throws IOException
 */
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters0239() throws IOException 
  {
    Path histPath = new Path(getClass().getClassLoader().getResource(
        "job_0.23.9-FAILED.jhist").getFile());
    JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
        (new Configuration()), histPath);
    JobInfo jobInfo = parser.parse(); 
    LOG.info(" job info: " + jobInfo.getJobname() + " "
      + jobInfo.getFinishedMaps() + " " 
      + jobInfo.getTotalMaps() + " " 
      + jobInfo.getJobId() ) ;
    }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例6: loadFullHistoryData

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
protected synchronized void loadFullHistoryData(boolean loadTasks,
    Path historyFileAbsolute) throws IOException {
  LOG.info("Loading history file: [" + historyFileAbsolute + "]");
  if (this.jobInfo != null) {
    return;
  }
  
  if (historyFileAbsolute != null) {
    JobHistoryParser parser = null;
    try {
      parser = createJobHistoryParser(historyFileAbsolute);
      this.jobInfo = parser.parse();
    } catch (IOException e) {
      throw new YarnRuntimeException("Could not load history file "
          + historyFileAbsolute, e);
    }
    IOException parseException = parser.getParseException(); 
    if (parseException != null) {
      throw new YarnRuntimeException(
          "Could not parse history file " + historyFileAbsolute, 
          parseException);
    }
  } else {
    throw new IOException("History file not found");
  }
  if (loadTasks) {
    loadAllTasks();
    LOG.info("TaskInfo loaded");
  }    
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:31,代码来源:CompletedJob.java

示例7: getJobInfo

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 * Read a job-history log file and construct the corresponding {@link JobInfo}
 * . Also cache the {@link JobInfo} for quick serving further requests.
 * 
 * @param logFile      the job history log file
 * @param fs           job tracker file system
 * @param jobTracker   the job tracker
 * @return JobInfo     job's basic information
 * @throws IOException
 */
static JobInfo getJobInfo(Path logFile, FileSystem fs,
    JobTracker jobTracker) throws IOException {
  String jobid =
      JobHistory.getJobIDFromHistoryFilePath(logFile).toString();
  JobInfo jobInfo = null;
  synchronized(jobHistoryCache) {
    jobInfo = jobHistoryCache.remove(jobid);
    if (jobInfo == null) {
      JobHistoryParser parser = new JobHistoryParser(fs, logFile);
      jobInfo = parser.parse();
      LOG.info("Loading Job History file "+jobid + ".   Cache size is " +
          jobHistoryCache.size());
    }
    jobHistoryCache.put(jobid, jobInfo);
    int CACHE_SIZE =
        jobTracker.conf.getInt(JTConfig.JT_JOBHISTORY_CACHE_SIZE, 5);
    if (jobHistoryCache.size() > CACHE_SIZE) {
      Iterator<Map.Entry<String, JobInfo>> it = 
        jobHistoryCache.entrySet().iterator();
      String removeJobId = it.next().getKey();
      it.remove();
      LOG.info("Job History file removed form cache "+removeJobId);
    }
  }

  return jobInfo;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:38,代码来源:JSPUtil.java

示例8: validateJobHistoryJobStatus

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 * Checks if the history file has expected job status
 * @param id job id
 * @param conf job conf
 */
private static void validateJobHistoryJobStatus(JobHistory jobHistory,
    JobID id, JobConf conf, String status) throws IOException  {

  // Get the history file name
  Path doneDir = jobHistory.getCompletedJobHistoryLocation();
  String logFileName = getDoneFile(jobHistory, conf, id, doneDir);
  
  // Framework history log file location
  Path logFile = new Path(doneDir, logFileName);
  FileSystem fileSys = logFile.getFileSystem(conf);
 
  // Check if the history file exists
  assertTrue("History file does not exist", fileSys.exists(logFile));

  // check history file permission
  assertTrue("History file permissions does not match", 
  fileSys.getFileStatus(logFile).getPermission().equals(
     new FsPermission(JobHistory.HISTORY_FILE_PERMISSION)));
  
  JobHistoryParser parser = new JobHistoryParser(fileSys, 
      logFile.toUri().getPath());
  JobHistoryParser.JobInfo jobInfo = parser.parse();
  

  assertTrue("Job Status read from job history file is not the expected" +
       " status", status.equals(jobInfo.getJobStatus()));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:33,代码来源:TestJobHistory.java

示例9: parsePreviousJobHistory

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
private void parsePreviousJobHistory() throws IOException {
  FSDataInputStream in = getPreviousJobHistoryStream(getConfig(),
      appAttemptID);
  JobHistoryParser parser = new JobHistoryParser(in);
  JobInfo jobInfo = parser.parse();
  Exception parseException = parser.getParseException();
  if (parseException != null) {
    LOG.info("Got an error parsing job-history file" +
        ", ignoring incomplete events.", parseException);
  }
  Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
      .getAllTasks();
  for (TaskInfo taskInfo : taskInfos.values()) {
    if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
      Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator =
          taskInfo.getAllTaskAttempts().entrySet().iterator();
      while (taskAttemptIterator.hasNext()) {
        Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
        if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
          taskAttemptIterator.remove();
        }
      }
      completedTasksFromPreviousRun
          .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
      LOG.info("Read from history task "
          + TypeConverter.toYarn(taskInfo.getTaskId()));
    }
  }
  LOG.info("Read completed tasks from history "
      + completedTasksFromPreviousRun.size());
  recoveredJobStartTime = jobInfo.getLaunchTime();

  // recover AMInfos
  List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos();
  if (jhAmInfoList != null) {
    for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) {
      AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
          jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
          jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
          jhAmInfo.getNodeManagerHttpPort());
      amInfos.add(amInfo);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:MRAppMaster.java

示例10: testMultipleFailedTasks

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Test
public void testMultipleFailedTasks() throws Exception {
  JobHistoryParser parser =
      new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
  EventReader reader = Mockito.mock(EventReader.class);
  final AtomicInteger numEventsRead = new AtomicInteger(0); // Hack!
  final org.apache.hadoop.mapreduce.TaskType taskType =
      org.apache.hadoop.mapreduce.TaskType.MAP;
  final TaskID[] tids = new TaskID[2];
  final JobID jid = new JobID("1", 1);
  tids[0] = new TaskID(jid, taskType, 0);
  tids[1] = new TaskID(jid, taskType, 1);
  Mockito.when(reader.getNextEvent()).thenAnswer(
      new Answer<HistoryEvent>() {
        public HistoryEvent answer(InvocationOnMock invocation)
            throws IOException {
          // send two task start and two task fail events for tasks 0 and 1
          int eventId = numEventsRead.getAndIncrement();
          TaskID tid = tids[eventId & 0x1];
          if (eventId < 2) {
            return new TaskStartedEvent(tid, 0, taskType, "");
          }
          if (eventId < 4) {
            TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType,
                "failed", "FAILED", null, new Counters());
            tfe.setDatum(tfe.getDatum());
            return tfe;
          }
          if (eventId < 5) {
            JobUnsuccessfulCompletionEvent juce =
                new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0,
                    "JOB_FAILED", Collections.singletonList(
                        "Task failed: " + tids[0].toString()));
            return juce;
          }
          return null;
        }
      });
  JobInfo info = parser.parse(reader);
  assertTrue("Task 0 not implicated",
      info.getErrorInfo().contains(tids[0].toString()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestJobHistoryParsing.java

示例11: parseHistoryFile

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
public JobInfo parseHistoryFile(Path path) throws IOException {
  LOG.info("parsing job history file " + path);
  JobHistoryParser parser = new JobHistoryParser(fs, path);
  return parser.parse();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:6,代码来源:JobHistoryFileParser.java

示例12: fetchData

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Override
public MapReduceApplicationData fetchData(AnalyticJob job) throws IOException {
  DataFiles files = getHistoryFiles(job);
  String confFile = files.getJobConfPath();
  String histFile = files.getJobHistPath();
  String appId = job.getAppId();
  String jobId = Utils.getJobIdFromApplicationId(appId);

  MapReduceApplicationData jobData = new MapReduceApplicationData();
  jobData.setAppId(appId).setJobId(jobId);

  // Fetch job config
  Configuration jobConf = new Configuration(false);
  jobConf.addResource(_fs.open(new Path(confFile)), confFile);
  Properties jobConfProperties = new Properties();
  for (Map.Entry<String, String> entry : jobConf) {
    jobConfProperties.put(entry.getKey(), entry.getValue());
  }
  jobData.setJobConf(jobConfProperties);

  // Check if job history file is too large and should be throttled
  if (_fs.getFileStatus(new Path(histFile)).getLen() > _maxLogSizeInMB * FileUtils.ONE_MB) {
    String errMsg = "The history log of MapReduce application: " + appId + " is over the limit size of "
            + _maxLogSizeInMB + " MB, the parsing process gets throttled.";
    logger.warn(errMsg);
    jobData.setDiagnosticInfo(errMsg);
    jobData.setSucceeded(false);  // set succeeded to false to avoid heuristic analysis
    return jobData;
  }

  // Analyze job history file
  JobHistoryParser parser = new JobHistoryParser(_fs, histFile);
  JobHistoryParser.JobInfo jobInfo = parser.parse();
  IOException parseException = parser.getParseException();
  if (parseException != null) {
    throw new RuntimeException("Could not parse history file " + histFile, parseException);
  }

  jobData.setSubmitTime(jobInfo.getSubmitTime());
  jobData.setStartTime(jobInfo.getLaunchTime());
  jobData.setFinishTime(jobInfo.getFinishTime());

  String state = jobInfo.getJobStatus();
  if (state.equals("SUCCEEDED")) {
    jobData.setSucceeded(true);
  }
  else if (state.equals("FAILED")) {
    jobData.setSucceeded(false);
    jobData.setDiagnosticInfo(jobInfo.getErrorInfo());
  } else {
    throw new RuntimeException("job neither succeeded or failed. can not process it ");
  }


  // Fetch job counter
  MapReduceCounterData jobCounter = getCounterData(jobInfo.getTotalCounters());

  // Fetch task data
  Map<TaskID, JobHistoryParser.TaskInfo> allTasks = jobInfo.getAllTasks();
  List<JobHistoryParser.TaskInfo> mapperInfoList = new ArrayList<JobHistoryParser.TaskInfo>();
  List<JobHistoryParser.TaskInfo> reducerInfoList = new ArrayList<JobHistoryParser.TaskInfo>();
  for (JobHistoryParser.TaskInfo taskInfo : allTasks.values()) {
    if (taskInfo.getTaskType() == TaskType.MAP) {
      mapperInfoList.add(taskInfo);
    } else {
      reducerInfoList.add(taskInfo);
    }
  }
  if (jobInfo.getTotalMaps() > MAX_SAMPLE_SIZE) {
    logger.debug(jobId + " total mappers: " + mapperInfoList.size());
  }
  if (jobInfo.getTotalReduces() > MAX_SAMPLE_SIZE) {
    logger.debug(jobId + " total reducers: " + reducerInfoList.size());
  }
  MapReduceTaskData[] mapperList = getTaskData(jobId, mapperInfoList);
  MapReduceTaskData[] reducerList = getTaskData(jobId, reducerInfoList);

  jobData.setCounters(jobCounter).setMapperData(mapperList).setReducerData(reducerList);

  return jobData;
}
 
开发者ID:linkedin,项目名称:dr-elephant,代码行数:82,代码来源:MapReduceFSFetcherHadoop2.java

示例13: testHistoryParsingForFailedAttempts

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testHistoryParsingForFailedAttempts() throws Exception {
  LOG.info("STARTING testHistoryParsingForFailedAttempts");
  try {
    Configuration conf = new Configuration();
    conf.setClass(
        CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
        MyResolver.class, DNSToSwitchMapping.class);
    RackResolver.init(conf);
    MRApp app = new MRAppWithHistoryWithFailedAttempt(2, 1, true, this
        .getClass().getName(), true);
    app.submit(conf);
    Job job = app.getContext().getAllJobs().values().iterator().next();
    JobId jobId = job.getID();
    app.waitForState(job, JobState.SUCCEEDED);

    // make sure all events are flushed
    app.waitForState(Service.STATE.STOPPED);

    String jobhistoryDir = JobHistoryUtils
        .getHistoryIntermediateDoneDirForUser(conf);
    JobHistory jobHistory = new JobHistory();
    jobHistory.init(conf);

    JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId)
        .getJobIndexInfo();
    String jobhistoryFileName = FileNameIndexUtils
        .getDoneFileName(jobIndexInfo);

    Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
    FSDataInputStream in = null;
    FileContext fc = null;
    try {
      fc = FileContext.getFileContext(conf);
      in = fc.open(fc.makeQualified(historyFilePath));
    } catch (IOException ioe) {
      LOG.info("Can not open history file: " + historyFilePath, ioe);
      throw (new Exception("Can not open History File"));
    }

    JobHistoryParser parser = new JobHistoryParser(in);
    JobInfo jobInfo = parser.parse();
    Exception parseException = parser.getParseException();
    Assert.assertNull("Caught an expected exception " + parseException,
        parseException);
    int noOffailedAttempts = 0;
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    for (Task task : job.getTasks().values()) {
      TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
      for (TaskAttempt taskAttempt : task.getAttempts().values()) {
        TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
            TypeConverter.fromYarn((taskAttempt.getID())));
        // Verify rack-name for all task attempts
        Assert.assertEquals("rack-name is incorrect",
            taskAttemptInfo.getRackname(), RACK_NAME);
        if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
          noOffailedAttempts++;
        }
      }
    }
    Assert.assertEquals("No of Failed tasks doesn't match.", 2,
        noOffailedAttempts);
  } finally {
    LOG.info("FINISHED testHistoryParsingForFailedAttempts");
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:67,代码来源:TestJobHistoryParsing.java

示例14: testCountersForFailedTask

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testCountersForFailedTask() throws Exception {
  LOG.info("STARTING testCountersForFailedTask");
  try {
    Configuration conf = new Configuration();
    conf.setClass(
        CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
        MyResolver.class, DNSToSwitchMapping.class);
    RackResolver.init(conf);
    MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true, this
        .getClass().getName(), true);
    app.submit(conf);
    Job job = app.getContext().getAllJobs().values().iterator().next();
    JobId jobId = job.getID();
    app.waitForState(job, JobState.FAILED);

    // make sure all events are flushed
    app.waitForState(Service.STATE.STOPPED);

    String jobhistoryDir = JobHistoryUtils
        .getHistoryIntermediateDoneDirForUser(conf);
    JobHistory jobHistory = new JobHistory();
    jobHistory.init(conf);

    JobIndexInfo jobIndexInfo = jobHistory.getJobFileInfo(jobId)
        .getJobIndexInfo();
    String jobhistoryFileName = FileNameIndexUtils
        .getDoneFileName(jobIndexInfo);

    Path historyFilePath = new Path(jobhistoryDir, jobhistoryFileName);
    FSDataInputStream in = null;
    FileContext fc = null;
    try {
      fc = FileContext.getFileContext(conf);
      in = fc.open(fc.makeQualified(historyFilePath));
    } catch (IOException ioe) {
      LOG.info("Can not open history file: " + historyFilePath, ioe);
      throw (new Exception("Can not open History File"));
    }

    JobHistoryParser parser = new JobHistoryParser(in);
    JobInfo jobInfo = parser.parse();
    Exception parseException = parser.getParseException();
    Assert.assertNull("Caught an expected exception " + parseException,
        parseException);
    for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
      TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
      CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
      Assert.assertNotNull("completed task report has null counters", ct
          .getReport().getCounters());
    }
  } finally {
    LOG.info("FINISHED testCountersForFailedTask");
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:56,代码来源:TestJobHistoryParsing.java

示例15: validateJobHistoryFileFormat

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入方法依赖的package包/类
/**
 *  Validates the format of contents of history file
 *  (1) history file exists and in correct location
 *  (2) Verify if the history file is parsable
 *  (3) Validate the contents of history file
 *     (a) Format of all TIMEs are checked against a regex
 *     (b) validate legality/format of job level key, values
 *     (c) validate legality/format of task level key, values
 *     (d) validate legality/format of attempt level key, values
 *     (e) check if all the TaskAttempts, Tasks started are finished.
 *         Check finish of each TaskAttemptID against its start to make sure
 *         that all TaskAttempts, Tasks started are indeed finished and the
 *         history log lines are in the proper order.
 *         We want to catch ordering of history lines like
 *            Task START
 *            Attempt START
 *            Task FINISH
 *            Attempt FINISH
 *         (speculative execution is turned off for this).
 * @param id job id
 * @param conf job conf
 */
public static void validateJobHistoryFileFormat(JobHistory jobHistory,
    JobID id, JobConf conf,
               String status, boolean splitsCanBeEmpty) throws IOException  {

  // Get the history file name
  Path dir = jobHistory.getCompletedJobHistoryLocation();
  String logFileName = getDoneFile(jobHistory, conf, id, dir);

  // Framework history log file location
  Path logFile = new Path(dir, logFileName);
  FileSystem fileSys = logFile.getFileSystem(conf);
 
  // Check if the history file exists
  assertTrue("History file does not exist", fileSys.exists(logFile));

  JobHistoryParser parser = new JobHistoryParser(fileSys, 
      logFile.toUri().getPath());
  JobHistoryParser.JobInfo jobInfo = parser.parse();

  // validate format of job level key, values
  validateJobLevelKeyValuesFormat(jobInfo, status);

  // validate format of task level key, values
  validateTaskLevelKeyValuesFormat(jobInfo, splitsCanBeEmpty);

  // validate format of attempt level key, values
  validateTaskAttemptLevelKeyValuesFormat(jobInfo);

  // check if all the TaskAttempts, Tasks started are finished for
  // successful jobs
  if (status.equals("SUCCEEDED")) {
    // Make sure that the lists in taskIDsToAttemptIDs are empty.
    for(Iterator<String> it = 
      taskIDsToAttemptIDs.keySet().iterator();it.hasNext();) {
      String taskid = it.next();
      assertTrue("There are some Tasks which are not finished in history " +
                 "file.", taskEnds.contains(taskid));
      List<String> attemptIDs = taskIDsToAttemptIDs.get(taskid);
      if(attemptIDs != null) {
        assertTrue("Unexpected. TaskID " + taskid + " has task attempt(s)" +
                   " that are not finished.", (attemptIDs.size() == 1));
      }
    }
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:68,代码来源:TestJobHistory.java


注:本文中的org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.parse方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。