当前位置: 首页>>代码示例>>Java>>正文


Java JobIndexInfo类代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo的典型用法代码示例。如果您正苦于以下问题:Java JobIndexInfo类的具体用法?Java JobIndexInfo怎么用?Java JobIndexInfo使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


JobIndexInfo类属于org.apache.hadoop.mapreduce.v2.jobhistory包,在下文中一共展示了JobIndexInfo类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addDirectoryToJobListCache

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
private void addDirectoryToJobListCache(Path path) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Adding " + path + " to job list cache.");
  }
  List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(path,
      doneDirFc);
  for (FileStatus fs : historyFileList) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Adding in history for " + fs.getPath());
    }
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    String confFileName = JobHistoryUtils
        .getIntermediateConfFileName(jobIndexInfo.getJobId());
    String summaryFileName = JobHistoryUtils
        .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
    HistoryFileInfo fileInfo = createHistoryFileInfo(fs.getPath(), new Path(fs
        .getPath().getParent(), confFileName), new Path(fs.getPath()
        .getParent(), summaryFileName), jobIndexInfo, true);
    jobListCache.addIfAbsent(fileInfo);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:HistoryFileManager.java

示例2: getJobFileInfo

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
/**
 * Searches the job history file FileStatus list for the specified JobId.
 * 
 * @param fileStatusList
 *          fileStatus list of Job History Files.
 * @param jobId
 *          The JobId to find.
 * @return A FileInfo object for the jobId, null if not found.
 * @throws IOException
 */
private HistoryFileInfo getJobFileInfo(List<FileStatus> fileStatusList,
    JobId jobId) throws IOException {
  for (FileStatus fs : fileStatusList) {
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    if (jobIndexInfo.getJobId().equals(jobId)) {
      String confFileName = JobHistoryUtils
          .getIntermediateConfFileName(jobIndexInfo.getJobId());
      String summaryFileName = JobHistoryUtils
          .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
      HistoryFileInfo fileInfo = createHistoryFileInfo(fs.getPath(), new Path(
          fs.getPath().getParent(), confFileName), new Path(fs.getPath()
          .getParent(), summaryFileName), jobIndexInfo, true);
      return fileInfo;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:HistoryFileManager.java

示例3: testPartialJob

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
/**
 * Simple test PartialJob
 */
@Test(timeout = 3000)
public void testPartialJob() throws Exception {
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
      "jobName", jobId, 3, 2, "JobStatus");
  PartialJob test = new PartialJob(jii, jobId);
  assertEquals(1.0f, test.getProgress(), 0.001);
  assertNull(test.getAllCounters());
  assertNull(test.getTasks());
  assertNull(test.getTasks(TaskType.MAP));
  assertNull(test.getTask(new TaskIdPBImpl()));

  assertNull(test.getTaskAttemptCompletionEvents(0, 100));
  assertNull(test.getMapAttemptCompletionEvents(0, 100));
  assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
  assertNull(test.getAMInfos());

}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestJobHistoryParsing.java

示例4: split

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
  JobsPair ret = new JobsPair();
  ret.full = Maps.newHashMap();
  ret.partial = Maps.newHashMap();
  for(Map.Entry<JobId, Job> entry: mocked.entrySet()) {
    JobId id = entry.getKey();
    Job j = entry.getValue();
    MockCompletedJob mockJob = new MockCompletedJob(j);
    // use MockCompletedJob to set everything below to make sure
    // consistent with what history server would do
    ret.full.put(id, mockJob);
    JobReport report = mockJob.getReport();
    JobIndexInfo info = new JobIndexInfo(report.getStartTime(), 
        report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id, 
        mockJob.getCompletedMaps(), mockJob.getCompletedReduces(),
        String.valueOf(mockJob.getState()));
    info.setJobStartTime(report.getStartTime());
    info.setQueueName(mockJob.getQueueName());
    ret.partial.put(id, new PartialJob(info, id));

  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:MockHistoryJobs.java

示例5: addDirectoryToJobListCache

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
private void addDirectoryToJobListCache(Path path) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Adding " + path + " to job list cache.");
  }
  List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(path,
      doneDirFc);
  for (FileStatus fs : historyFileList) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Adding in history for " + fs.getPath());
    }
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    String confFileName = JobHistoryUtils
        .getIntermediateConfFileName(jobIndexInfo.getJobId());
    String summaryFileName = JobHistoryUtils
        .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
    HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(fs
        .getPath().getParent(), confFileName), new Path(fs.getPath()
        .getParent(), summaryFileName), jobIndexInfo, true);
    jobListCache.addIfAbsent(fileInfo);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:HistoryFileManager.java

示例6: getJobFileInfo

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
/**
 * Searches the job history file FileStatus list for the specified JobId.
 * 
 * @param fileStatusList
 *          fileStatus list of Job History Files.
 * @param jobId
 *          The JobId to find.
 * @return A FileInfo object for the jobId, null if not found.
 * @throws IOException
 */
private HistoryFileInfo getJobFileInfo(List<FileStatus> fileStatusList,
    JobId jobId) throws IOException {
  for (FileStatus fs : fileStatusList) {
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    if (jobIndexInfo.getJobId().equals(jobId)) {
      String confFileName = JobHistoryUtils
          .getIntermediateConfFileName(jobIndexInfo.getJobId());
      String summaryFileName = JobHistoryUtils
          .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
      HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(
          fs.getPath().getParent(), confFileName), new Path(fs.getPath()
          .getParent(), summaryFileName), jobIndexInfo, true);
      return fileInfo;
    }
  }
  return null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:HistoryFileManager.java

示例7: testHistoryFileInfoSummaryFileNotExist

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
@Test
public void testHistoryFileInfoSummaryFileNotExist() throws Exception {
  HistoryFileManagerTest hmTest = new HistoryFileManagerTest();
  String job = "job_1410889000000_123456";
  Path summaryFile = new Path(job + ".summary");
  JobIndexInfo jobIndexInfo = new JobIndexInfo();
  jobIndexInfo.setJobId(TypeConverter.toYarn(JobID.forName(job)));
  Configuration conf = dfsCluster.getConfiguration(0);
  conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR,
      "/" + UUID.randomUUID());
  conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
      "/" + UUID.randomUUID());
  hmTest.serviceInit(conf);
  HistoryFileInfo info = hmTest.getHistoryFileInfo(null, null,
      summaryFile, jobIndexInfo, false);
  info.moveToDone();
  Assert.assertFalse(info.didMoveFail());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestHistoryFileManager.java

示例8: testPartialJob

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
/**
 * Simple test PartialJob
 */
@Test(timeout = 3000)
public void testPartialJob() throws Exception {
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
      "jobName", jobId, 3, 2, "JobStatus");
  PartialJob test = new PartialJob(jii, jobId);
  
  Assert.assertEquals(1.0f, test.getProgress(), 0.001f);
  assertNull(test.getAllCounters());
  assertNull(test.getTasks());
  assertNull(test.getTasks(TaskType.MAP));
  assertNull(test.getTask(new TaskIdPBImpl()));

  assertNull(test.getTaskAttemptCompletionEvents(0, 100));
  assertNull(test.getMapAttemptCompletionEvents(0, 100));
  assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
  assertNull(test.getAMInfos());

}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestJobHistoryParsing.java

示例9: testPartialJob

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
/**
 * Simple test PartialJob
 */
@Test(timeout = 1000)
public void testPartialJob() throws Exception {
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
      "jobName", jobId, 3, 2, "JobStatus");
  PartialJob test = new PartialJob(jii, jobId);
  assertEquals(1.0f, test.getProgress(), 0.001);
  assertNull(test.getAllCounters());
  assertNull(test.getTasks());
  assertNull(test.getTasks(TaskType.MAP));
  assertNull(test.getTask(new TaskIdPBImpl()));

  assertNull(test.getTaskAttemptCompletionEvents(0, 100));
  assertNull(test.getMapAttemptCompletionEvents(0, 100));
  assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
  assertNull(test.getAMInfos());

}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:23,代码来源:TestJobHistoryParsing.java

示例10: split

import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; //导入依赖的package包/类
private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
  JobsPair ret = new JobsPair();
  ret.full = Maps.newHashMap();
  ret.partial = Maps.newHashMap();
  for(Map.Entry<JobId, Job> entry: mocked.entrySet()) {
    JobId id = entry.getKey();
    Job j = entry.getValue();
    MockCompletedJob mockJob = new MockCompletedJob(j);
    // use MockCompletedJob to set everything below to make sure
    // consistent with what history server would do
    ret.full.put(id, mockJob);
    JobReport report = mockJob.getReport();
    JobIndexInfo info = new JobIndexInfo(report.getStartTime(), 
        report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id, 
        mockJob.getCompletedMaps(), mockJob.getCompletedReduces(),
        String.valueOf(mockJob.getState()));
    info.setQueueName(mockJob.getQueueName());
    ret.partial.put(id, new PartialJob(info, id));

  }
  return ret;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:23,代码来源:MockHistoryJobs.java


注:本文中的org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。