当前位置: 首页>>代码示例>>Java>>正文


Java FileNameIndexUtils.getIndexInfo方法代码示例

本文整理汇总了Java中org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils.getIndexInfo方法的典型用法代码示例。如果您正苦于以下问题:Java FileNameIndexUtils.getIndexInfo方法的具体用法?Java FileNameIndexUtils.getIndexInfo怎么用?Java FileNameIndexUtils.getIndexInfo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils的用法示例。


在下文中一共展示了FileNameIndexUtils.getIndexInfo方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addDirectoryToJobListCache

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
private void addDirectoryToJobListCache(Path path) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Adding " + path + " to job list cache.");
  }
  List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(path,
      doneDirFc);
  for (FileStatus fs : historyFileList) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Adding in history for " + fs.getPath());
    }
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    String confFileName = JobHistoryUtils
        .getIntermediateConfFileName(jobIndexInfo.getJobId());
    String summaryFileName = JobHistoryUtils
        .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
    HistoryFileInfo fileInfo = createHistoryFileInfo(fs.getPath(), new Path(fs
        .getPath().getParent(), confFileName), new Path(fs.getPath()
        .getParent(), summaryFileName), jobIndexInfo, true);
    jobListCache.addIfAbsent(fileInfo);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:HistoryFileManager.java

示例2: getJobFileInfo

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
/**
 * Searches the job history file FileStatus list for the specified JobId.
 * 
 * @param fileStatusList
 *          fileStatus list of Job History Files.
 * @param jobId
 *          The JobId to find.
 * @return A FileInfo object for the jobId, null if not found.
 * @throws IOException
 */
private HistoryFileInfo getJobFileInfo(List<FileStatus> fileStatusList,
    JobId jobId) throws IOException {
  for (FileStatus fs : fileStatusList) {
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    if (jobIndexInfo.getJobId().equals(jobId)) {
      String confFileName = JobHistoryUtils
          .getIntermediateConfFileName(jobIndexInfo.getJobId());
      String summaryFileName = JobHistoryUtils
          .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
      HistoryFileInfo fileInfo = createHistoryFileInfo(fs.getPath(), new Path(
          fs.getPath().getParent(), confFileName), new Path(fs.getPath()
          .getParent(), summaryFileName), jobIndexInfo, true);
      return fileInfo;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:HistoryFileManager.java

示例3: addDirectoryToJobListCache

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
private void addDirectoryToJobListCache(Path path) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Adding " + path + " to job list cache.");
  }
  List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(path,
      doneDirFc);
  for (FileStatus fs : historyFileList) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Adding in history for " + fs.getPath());
    }
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    String confFileName = JobHistoryUtils
        .getIntermediateConfFileName(jobIndexInfo.getJobId());
    String summaryFileName = JobHistoryUtils
        .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
    HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(fs
        .getPath().getParent(), confFileName), new Path(fs.getPath()
        .getParent(), summaryFileName), jobIndexInfo, true);
    jobListCache.addIfAbsent(fileInfo);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:HistoryFileManager.java

示例4: getJobFileInfo

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
/**
 * Searches the job history file FileStatus list for the specified JobId.
 * 
 * @param fileStatusList
 *          fileStatus list of Job History Files.
 * @param jobId
 *          The JobId to find.
 * @return A FileInfo object for the jobId, null if not found.
 * @throws IOException
 */
private HistoryFileInfo getJobFileInfo(List<FileStatus> fileStatusList,
    JobId jobId) throws IOException {
  for (FileStatus fs : fileStatusList) {
    JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
        .getName());
    if (jobIndexInfo.getJobId().equals(jobId)) {
      String confFileName = JobHistoryUtils
          .getIntermediateConfFileName(jobIndexInfo.getJobId());
      String summaryFileName = JobHistoryUtils
          .getIntermediateSummaryFileName(jobIndexInfo.getJobId());
      HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(
          fs.getPath().getParent(), confFileName), new Path(fs.getPath()
          .getParent(), summaryFileName), jobIndexInfo, true);
      return fileInfo;
    }
  }
  return null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:HistoryFileManager.java

示例5: clean

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
/**
 * Clean up older history files.
 * 
 * @throws IOException
 *           on any error trying to remove the entries.
 */
@SuppressWarnings("unchecked")
void clean() throws IOException {
  long cutoff = System.currentTimeMillis() - maxHistoryAge;
  boolean halted = false;
  List<FileStatus> serialDirList = getHistoryDirsForCleaning(cutoff);
  // Sort in ascending order. Relies on YYYY/MM/DD/Serial
  Collections.sort(serialDirList);
  for (FileStatus serialDir : serialDirList) {
    List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(
        serialDir.getPath(), doneDirFc);
    for (FileStatus historyFile : historyFileList) {
      JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(historyFile
          .getPath().getName());
      long effectiveTimestamp = getEffectiveTimestamp(
          jobIndexInfo.getFinishTime(), historyFile);
      if (effectiveTimestamp <= cutoff) {
        HistoryFileInfo fileInfo = this.jobListCache.get(jobIndexInfo
            .getJobId());
        if (fileInfo == null) {
          String confFileName = JobHistoryUtils
              .getIntermediateConfFileName(jobIndexInfo.getJobId());

          fileInfo = createHistoryFileInfo(historyFile.getPath(), new Path(
              historyFile.getPath().getParent(), confFileName), null,
              jobIndexInfo, true);
        }
        deleteJobFromDone(fileInfo);
      } else {
        halted = true;
        break;
      }
    }
    if (!halted) {
      deleteDir(serialDir);
      removeDirectoryFromSerialNumberIndex(serialDir.getPath());
      existingDoneSubdirs.remove(serialDir.getPath());
    } else {
      break; // Don't scan any more directories.
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:HistoryFileManager.java

示例6: clean

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
/**
 * Clean up older history files.
 * 
 * @throws IOException
 *           on any error trying to remove the entries.
 */
@SuppressWarnings("unchecked")
void clean() throws IOException {
  long cutoff = System.currentTimeMillis() - maxHistoryAge;
  boolean halted = false;
  List<FileStatus> serialDirList = getHistoryDirsForCleaning(cutoff);
  // Sort in ascending order. Relies on YYYY/MM/DD/Serial
  Collections.sort(serialDirList);
  for (FileStatus serialDir : serialDirList) {
    List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(
        serialDir.getPath(), doneDirFc);
    for (FileStatus historyFile : historyFileList) {
      JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(historyFile
          .getPath().getName());
      long effectiveTimestamp = getEffectiveTimestamp(
          jobIndexInfo.getFinishTime(), historyFile);
      if (effectiveTimestamp <= cutoff) {
        HistoryFileInfo fileInfo = this.jobListCache.get(jobIndexInfo
            .getJobId());
        if (fileInfo == null) {
          String confFileName = JobHistoryUtils
              .getIntermediateConfFileName(jobIndexInfo.getJobId());

          fileInfo = new HistoryFileInfo(historyFile.getPath(), new Path(
              historyFile.getPath().getParent(), confFileName), null,
              jobIndexInfo, true);
        }
        deleteJobFromDone(fileInfo);
      } else {
        halted = true;
        break;
      }
    }
    if (!halted) {
      deleteDir(serialDir);
      removeDirectoryFromSerialNumberIndex(serialDir.getPath());
      existingDoneSubdirs.remove(serialDir.getPath());
    } else {
      break; // Don't scan any more directories.
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:48,代码来源:HistoryFileManager.java

示例7: clean

import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; //导入方法依赖的package包/类
/**
 * Clean up older history files.
 * 
 * @throws IOException
 *           on any error trying to remove the entries.
 */
@SuppressWarnings("unchecked")
void clean() throws IOException {
  // TODO this should be replaced by something that knows about the directory
  // structure and will put less of a load on HDFS.
  long cutoff = System.currentTimeMillis() - maxHistoryAge;
  boolean halted = false;
  // TODO Delete YYYY/MM/DD directories.
  List<FileStatus> serialDirList = findTimestampedDirectories();
  // Sort in ascending order. Relies on YYYY/MM/DD/Serial
  Collections.sort(serialDirList);
  for (FileStatus serialDir : serialDirList) {
    List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(
        serialDir.getPath(), doneDirFc);
    for (FileStatus historyFile : historyFileList) {
      JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(historyFile
          .getPath().getName());
      long effectiveTimestamp = getEffectiveTimestamp(
          jobIndexInfo.getFinishTime(), historyFile);
      if (effectiveTimestamp <= cutoff) {
        HistoryFileInfo fileInfo = this.jobListCache.get(jobIndexInfo
            .getJobId());
        if (fileInfo == null) {
          String confFileName = JobHistoryUtils
              .getIntermediateConfFileName(jobIndexInfo.getJobId());

          fileInfo = new HistoryFileInfo(historyFile.getPath(), new Path(
              historyFile.getPath().getParent(), confFileName), null,
              jobIndexInfo, true);
        }
        deleteJobFromDone(fileInfo);
      } else {
        halted = true;
        break;
      }
    }
    if (!halted) {
      doneDirFc.delete(doneDirFc.makeQualified(serialDir.getPath()), true);
      removeDirectoryFromSerialNumberIndex(serialDir.getPath());
      existingDoneSubdirs.remove(serialDir.getPath());
    } else {
      break; // Don't scan any more directories.
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:51,代码来源:HistoryFileManager.java


注:本文中的org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils.getIndexInfo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。