当前位置: 首页>>代码示例>>Java>>正文


Java JobHistoryParser类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser的典型用法代码示例。如果您正苦于以下问题:Java JobHistoryParser类的具体用法?Java JobHistoryParser怎么用?Java JobHistoryParser使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


JobHistoryParser类属于org.apache.hadoop.mapreduce.jobhistory包,在下文中一共展示了JobHistoryParser类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFailedJobHistoryWithoutDiagnostics

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
@Test
public void testFailedJobHistoryWithoutDiagnostics() throws Exception {
  final Path histPath = new Path(getClass().getClassLoader().getResource(
      "job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist")
      .getFile());
  final FileSystem lfs = FileSystem.getLocal(new Configuration());
  final FSDataInputStream fsdis = lfs.open(histPath);
  try {
    JobHistoryParser parser = new JobHistoryParser(fsdis);
    JobInfo info = parser.parse();
    assertEquals("History parsed jobId incorrectly",
        info.getJobId(), JobID.forName("job_1393307629410_0001") );
    assertEquals("Default diagnostics incorrect ", "", info.getErrorInfo());
  } finally {
    fsdis.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例2: getTaskExecTime

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
private long[] getTaskExecTime(JobHistoryParser.TaskAttemptInfo attempInfo) {
  long startTime = attempInfo.getStartTime();
  long finishTime = attempInfo.getFinishTime();
  boolean isMapper = (attempInfo.getTaskType() == TaskType.MAP);

  long[] time;
  if (isMapper) {
    time = new long[]{finishTime - startTime, 0, 0, startTime, finishTime};
  } else {
    long shuffleFinishTime = attempInfo.getShuffleFinishTime();
    long mergeFinishTime = attempInfo.getSortFinishTime();
    time = new long[]{finishTime - startTime, shuffleFinishTime - startTime,
            mergeFinishTime - shuffleFinishTime, startTime, finishTime};
  }
  return time;
}
 
开发者ID:linkedin,项目名称:dr-elephant,代码行数:17,代码来源:MapReduceFSFetcherHadoop2.java

示例3: testCompletedJobWithDiagnostics

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
@Test (timeout=30000)
public void testCompletedJobWithDiagnostics() throws Exception {
  final String jobError = "Job Diagnostics";
  JobInfo jobInfo = spy(new JobInfo());
  when(jobInfo.getErrorInfo()).thenReturn(jobError);
  when(jobInfo.getJobStatus()).thenReturn(JobState.FAILED.toString());
  when(jobInfo.getAMInfos()).thenReturn(Collections.<JobHistoryParser.AMInfo>emptyList());
  final JobHistoryParser mockParser = mock(JobHistoryParser.class);
  when(mockParser.parse()).thenReturn(jobInfo);
  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);
  when(info.getHistoryFile()).thenReturn(fullHistoryPath);
  CompletedJob job =
    new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
        info, jobAclsManager) {
          @Override
          protected JobHistoryParser createJobHistoryParser(
              Path historyFileAbsolute) throws IOException {
             return mockParser;
          }
  };
  assertEquals(jobError, job.getReport().getDiagnostics());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:TestJobHistoryEntities.java

示例4: getSimulatedJobHistory

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
/**
 * Get the simulated job history of a job.
 * @param simulatedJobID - simulated job id.
 * @return - simulated job information.
 * @throws IOException - if an I/O error occurs.
 */
public JobHistoryParser.JobInfo getSimulatedJobHistory(JobID simulatedJobID) 
    throws IOException {
  FileSystem fs = null;
  try {
    String historyFilePath = jtClient.getProxy().
        getJobHistoryLocationForRetiredJob(simulatedJobID);
    Path jhpath = new Path(historyFilePath);
    fs = jhpath.getFileSystem(conf);
    JobHistoryParser jhparser = new JobHistoryParser(fs, jhpath);
    JobHistoryParser.JobInfo jhInfo = jhparser.parse();
    return jhInfo;

  } finally {
    fs.close();
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:23,代码来源:GridmixJobVerification.java

示例5: getAMInfos

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
@Override
public List<AMInfo> getAMInfos() {
  List<AMInfo> amInfos = new LinkedList<AMInfo>();
  for (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.AMInfo jhAmInfo : jobInfo
      .getAMInfos()) {
    AMInfo amInfo =
        MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
            jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
            jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
            jhAmInfo.getNodeManagerHttpPort());
 
    amInfos.add(amInfo);
  }
  return amInfos;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:CompletedJob.java

示例6: testTaskAttemptUnsuccessfulCompletionWithoutCounters203

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
/**
 * Test compatibility of JobHistoryParser with 2.0.3-alpha history files
 * @throws IOException
 */
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters203() throws IOException 
  { 
    Path histPath = new Path(getClass().getClassLoader().getResource(
      "job_2.0.3-alpha-FAILED.jhist").getFile());
    JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
        (new Configuration()), histPath);
    JobInfo jobInfo = parser.parse(); 
    LOG.info(" job info: " + jobInfo.getJobname() + " "
      + jobInfo.getFinishedMaps() + " " 
      + jobInfo.getTotalMaps() + " " 
      + jobInfo.getJobId() ) ;
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例7: testTaskAttemptUnsuccessfulCompletionWithoutCounters240

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
/**
 * Test compatibility of JobHistoryParser with 2.4.0 history files
 * @throws IOException
 */
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters240() throws IOException 
  {
    Path histPath = new Path(getClass().getClassLoader().getResource(
      "job_2.4.0-FAILED.jhist").getFile());
    JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
        (new Configuration()), histPath);
    JobInfo jobInfo = parser.parse(); 
    LOG.info(" job info: " + jobInfo.getJobname() + " "
      + jobInfo.getFinishedMaps() + " "
      + jobInfo.getTotalMaps() + " "
      + jobInfo.getJobId() );
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例8: testTaskAttemptUnsuccessfulCompletionWithoutCounters0239

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
/**
 * Test compatibility of JobHistoryParser with 0.23.9 history files
 * @throws IOException
 */
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters0239() throws IOException 
  {
    Path histPath = new Path(getClass().getClassLoader().getResource(
        "job_0.23.9-FAILED.jhist").getFile());
    JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
        (new Configuration()), histPath);
    JobInfo jobInfo = parser.parse(); 
    LOG.info(" job info: " + jobInfo.getJobname() + " "
      + jobInfo.getFinishedMaps() + " " 
      + jobInfo.getTotalMaps() + " " 
      + jobInfo.getJobId() ) ;
    }
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestJobHistoryParsing.java

示例9: getTaskData

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
protected MapReduceTaskData[] getTaskData(String jobId, List<JobHistoryParser.TaskInfo> infoList) {
  int sampleSize = sampleAndGetSize(jobId, infoList);

  List<MapReduceTaskData> taskList = new ArrayList<MapReduceTaskData>();
  for (int i = 0; i < sampleSize; i++) {
    JobHistoryParser.TaskInfo tInfo = infoList.get(i);

    String taskId = tInfo.getTaskId().toString();
    TaskAttemptID attemptId = null;
    if(tInfo.getTaskStatus().equals("SUCCEEDED")) {
      attemptId = tInfo.getSuccessfulAttemptId();
    } else {
      attemptId = tInfo.getFailedDueToAttemptId();
    }

    MapReduceTaskData taskData = new MapReduceTaskData(taskId, attemptId == null ? "" : attemptId.toString() , tInfo.getTaskStatus());

    MapReduceCounterData taskCounterData = getCounterData(tInfo.getCounters());

    long[] taskExecTime = null;
    if (attemptId != null) {
      taskExecTime = getTaskExecTime(tInfo.getAllTaskAttempts().get(attemptId));
    }

    taskData.setTimeAndCounter(taskExecTime, taskCounterData);
    taskList.add(taskData);
  }
  return taskList.toArray(new MapReduceTaskData[taskList.size()]);
}
 
开发者ID:linkedin,项目名称:dr-elephant,代码行数:30,代码来源:MapReduceFSFetcherHadoop2.java

示例10: testGetTaskData

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
@Test
public void testGetTaskData() {
  FetcherConfiguration fetcherConf = new FetcherConfiguration(document9.getDocumentElement());

  try {
    MapReduceFSFetcherHadoop2 fetcher = new MapReduceFSFetcherHadoop2(
            fetcherConf.getFetchersConfigurationData().get(0));
    String jobId = "job_14000_001";
    List<JobHistoryParser.TaskInfo> infoList = new ArrayList<JobHistoryParser.TaskInfo>();
    infoList.add(new MockTaskInfo(1, true));
    infoList.add(new MockTaskInfo(2, false));

    MapReduceTaskData[] taskList = fetcher.getTaskData(jobId, infoList);
    Assert.assertNotNull("taskList should not be null.", taskList);
    int succeededTaskCount = 0;
    for (MapReduceTaskData task : taskList) {
      Assert.assertNotNull("Null pointer in taskList.", task);
      if(task.getState().equals("SUCCEEDED")) {
        succeededTaskCount++;
      }
    }
    Assert.assertEquals("Should have total two tasks.", 2, taskList.length);
    Assert.assertEquals("Should have only one succeeded task.", 1, succeededTaskCount);
  } catch (IOException e) {
    Assert.assertNull("Failed to initialize FileSystem.", e);
  }
}
 
开发者ID:linkedin,项目名称:dr-elephant,代码行数:28,代码来源:MapReduceFSFetcherHadoop2Test.java

示例11: MockTaskInfo

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
public MockTaskInfo(int id, boolean succeeded) {
  this.taskId = new TaskID("job1", 1, TaskType.MAP, id);
  this.taskType = TaskType.MAP;
  this.succeeded = succeeded;
  this.counters = new Counters();
  this.finishTime = System.currentTimeMillis();
  this.startTime = finishTime - 10000;
  this.failedDueToAttemptId = new TaskAttemptID(taskId, 0);
  this.successfulAttemptId = new TaskAttemptID(taskId, 1);
  this.attemptsMap = new HashMap<TaskAttemptID, JobHistoryParser.TaskAttemptInfo>();
  this.attemptsMap.put(failedDueToAttemptId, new JobHistoryParser.TaskAttemptInfo());
  this.attemptsMap.put(successfulAttemptId, new JobHistoryParser.TaskAttemptInfo());
}
 
开发者ID:linkedin,项目名称:dr-elephant,代码行数:14,代码来源:MapReduceFSFetcherHadoop2Test.java

示例12: loadFullHistoryData

import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; //导入依赖的package包/类
protected synchronized void loadFullHistoryData(boolean loadTasks,
    Path historyFileAbsolute) throws IOException {
  LOG.info("Loading history file: [" + historyFileAbsolute + "]");
  if (this.jobInfo != null) {
    return;
  }
  
  if (historyFileAbsolute != null) {
    JobHistoryParser parser = null;
    try {
      parser = createJobHistoryParser(historyFileAbsolute);
      this.jobInfo = parser.parse();
    } catch (IOException e) {
      throw new YarnRuntimeException("Could not load history file "
          + historyFileAbsolute, e);
    }
    IOException parseException = parser.getParseException(); 
    if (parseException != null) {
      throw new YarnRuntimeException(
          "Could not parse history file " + historyFileAbsolute, 
          parseException);
    }
  } else {
    throw new IOException("History file not found");
  }
  if (loadTasks) {
    loadAllTasks();
    LOG.info("TaskInfo loaded");
  }    
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:31,代码来源:CompletedJob.java


注:本文中的org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。