当前位置: 首页>>代码示例>>Java>>正文


Java IOUtils.listDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.IOUtils.listDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.listDirectory方法的具体用法?Java IOUtils.listDirectory怎么用?Java IOUtils.listDirectory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.IOUtils的用法示例。


在下文中一共展示了IOUtils.listDirectory方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doPreUpgrade

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Perform any steps that must succeed across all storage dirs/JournalManagers
 * involved in an upgrade before proceeding onto the actual upgrade stage. If
 * a call to any JM's or local storage dir's doPreUpgrade method fails, then
 * doUpgrade will not be called for any JM. The existing current dir is
 * renamed to previous.tmp, and then a new, empty current dir is created.
 *
 * @param conf configuration for creating {@link EditLogFileOutputStream}
 * @param sd the storage directory to perform the pre-upgrade procedure.
 * @throws IOException in the event of error
 */
static void doPreUpgrade(Configuration conf, StorageDirectory sd)
    throws IOException {
  LOG.info("Starting upgrade of storage directory " + sd.getRoot());

  // rename current to tmp
  renameCurToTmp(sd);

  final File curDir = sd.getCurrentDir();
  final File tmpDir = sd.getPreviousTmp();
  List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() {
    @Override
    public boolean accept(File dir, String name) {
      return dir.equals(tmpDir)
          && name.startsWith(NNStorage.NameNodeFile.EDITS.getName());
    }
  });

  for (String s : fileNameList) {
    File prevFile = new File(tmpDir, s);
    File newFile = new File(curDir, prevFile.getName());
    Files.createLink(newFile.toPath(), prevFile.toPath());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:NNUpgradeUtil.java

示例2: getNextSubDir

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Get the next subdirectory within the block pool slice.
 *
 * @return         The next subdirectory within the block pool slice, or
 *                   null if there are no more.
 */
private String getNextSubDir(String prev, File dir)
      throws IOException {
  List<String> children =
      IOUtils.listDirectory(dir, SubdirFilter.INSTANCE);
  cache = null;
  cacheMs = 0;
  if (children.size() == 0) {
    LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
        storageID, bpid, dir.getAbsolutePath());
    return null;
  }
  Collections.sort(children);
  String nextSubDir = nextSorted(children, prev);
  if (nextSubDir == null) {
    LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}",
        storageID, bpid, dir.getAbsolutePath());
  } else {
    LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " +
        "within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath());
  }
  return nextSubDir;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FsVolumeImpl.java

示例3: getSubdirEntries

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private List<String> getSubdirEntries() throws IOException {
  if (state.curFinalizedSubDir == null) {
    return null; // There are no entries in the null subdir.
  }
  long now = Time.monotonicNow();
  if (cache != null) {
    long delta = now - cacheMs;
    if (delta < maxStalenessMs) {
      return cache;
    } else {
      LOG.trace("getSubdirEntries({}, {}): purging entries cache for {} " +
        "after {} ms.", storageID, bpid, state.curFinalizedSubDir, delta);
      cache = null;
    }
  }
  File dir = Paths.get(bpidDir.getAbsolutePath(), "current", "finalized",
                state.curFinalizedDir, state.curFinalizedSubDir).toFile();
  List<String> entries =
      IOUtils.listDirectory(dir, BlockFileFilter.INSTANCE);
  if (entries.size() == 0) {
    entries = null;
  } else {
    Collections.sort(entries);
  }
  if (entries == null) {
    LOG.trace("getSubdirEntries({}, {}): no entries found in {}",
        storageID, bpid, dir.getAbsolutePath());
  } else {
    LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}", 
        storageID, bpid, entries.size(), dir.getAbsolutePath());
  }
  cache = entries;
  cacheMs = now;
  return cache;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:FsVolumeImpl.java

示例4: testPreserveEditLogs

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Test
public void testPreserveEditLogs() throws Exception {
  conf = new HdfsConfiguration();
  conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
  String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);

  log("Normal NameNode upgrade", 1);
  File[] created =
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
  for (final File createdDir : created) {
    List<String> fileNameList =
        IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
    for (String fileName : fileNameList) {
      String tmpFileName = fileName + ".tmp";
      File existingFile = new File(createdDir, fileName);
      File tmpFile = new File(createdDir, tmpFileName);
      Files.move(existingFile.toPath(), tmpFile.toPath());
      File newFile = new File(createdDir, fileName);
      Preconditions.checkState(newFile.createNewFile(),
          "Cannot create new edits log file in " + createdDir);
      EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
          HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
          false);
      EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
          (int)tmpFile.length());
      out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
      FSEditLogOp logOp = in.readOp();
      while (logOp != null) {
        out.write(logOp);
        logOp = in.readOp();
      }
      out.setReadyToFlush();
      out.flushAndSync(true);
      out.close();
      Files.delete(tmpFile.toPath());
    }
  }

  cluster = createCluster();

  DFSInotifyEventInputStream ieis =
      cluster.getFileSystem().getInotifyEventStream(0);
  EventBatch batch = ieis.poll();
  Event[] events = batch.getEvents();
  assertTrue("Should be able to get transactions before the upgrade.",
      events.length > 0);
  assertEquals(events[0].getEventType(), Event.EventType.CREATE);
  assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
  cluster.shutdown();
  UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestDFSUpgrade.java


注:本文中的org.apache.hadoop.io.IOUtils.listDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。