当前位置: 首页>>代码示例>>Java>>正文


Java RaidDFSUtil.getCorruptFiles方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.RaidDFSUtil.getCorruptFiles方法的典型用法代码示例。如果您正苦于以下问题:Java RaidDFSUtil.getCorruptFiles方法的具体用法?Java RaidDFSUtil.getCorruptFiles怎么用?Java RaidDFSUtil.getCorruptFiles使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.RaidDFSUtil的用法示例。


在下文中一共展示了RaidDFSUtil.getCorruptFiles方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getCorruptFiles

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * gets a list of corrupt files from the name node
 * and filters out files that are currently being fixed or 
 * that were recently fixed
 */
private List<Path> getCorruptFiles() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem) 
    (new Path("/")).getFileSystem(getConf());

  String[] files = RaidDFSUtil.getCorruptFiles(dfs);
  List<Path> corruptFiles = new LinkedList<Path>();

  for (String f: files) {
    Path p = new Path(f);
    // filter out files that are being fixed or that were recently fixed
    if (!fileIndex.containsKey(p.toString())) {
      corruptFiles.add(p);
    }
  }
  RaidUtils.filterTrash(getConf(), corruptFiles);

  return corruptFiles;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:24,代码来源:DistBlockFixer.java

示例2: waitUntilCorruptFileCount

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * sleeps for up to 20s until the number of corrupt files 
 * in the file system is equal to the number specified
 */
private void waitUntilCorruptFileCount(DistributedFileSystem dfs,
                                       int corruptFiles)
  throws IOException {
  long waitStart = System.currentTimeMillis();
  while (RaidDFSUtil.getCorruptFiles(dfs).length != corruptFiles) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException ignore) {
      
    }

    if (System.currentTimeMillis() > waitStart + 20000L) {
      break;
    }
  }
  
  int corruptFilesFound = RaidDFSUtil.getCorruptFiles(dfs).length;
  if (corruptFilesFound != corruptFiles) {
    throw new IOException("expected " + corruptFiles + 
                          " corrupt files but got " +
                          corruptFilesFound);
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:28,代码来源:TestRaidShellFsck.java

示例3: getCorruptFiles

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * @return A list of corrupt files as obtained from the namenode
 */
List<Path> getCorruptFiles() throws IOException {
  DistributedFileSystem dfs = helper.getDFS(new Path("/"));

  String[] files = RaidDFSUtil.getCorruptFiles(dfs);
  List<Path> corruptFiles = new LinkedList<Path>();
  for (String f: files) {
    Path p = new Path(f);
    if (!history.containsKey(p.toString())) {
      corruptFiles.add(p);
    }
  }
  RaidUtils.filterTrash(getConf(), corruptFiles);
  return corruptFiles;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:18,代码来源:LocalBlockFixer.java

示例4: fsck

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * checks the raided file system, prints a list of corrupt files to
 * System.out and returns the number of corrupt files
 */
public int fsck(final String path) throws IOException {

  FileSystem fs = (new Path(path)).getFileSystem(conf);

  // if we got a raid fs, get the underlying fs
  if (fs instanceof DistributedRaidFileSystem) {
    fs = ((DistributedRaidFileSystem) fs).getFileSystem();
  }

  // check that we have a distributed fs
  if (!(fs instanceof DistributedFileSystem)) {
    throw new IOException("expected DistributedFileSystem but got " +
              fs.getClass().getName());
  }
  final DistributedFileSystem dfs = (DistributedFileSystem) fs;

  // get conf settings
  String xorPrefix = RaidNode.xorDestinationPath(conf).toUri().getPath();
  String rsPrefix = RaidNode.rsDestinationPath(conf).toUri().getPath();
  if (!xorPrefix.endsWith("/")) {
    xorPrefix = xorPrefix + "/";
  }
  if (!rsPrefix.endsWith("/")) {
    rsPrefix = rsPrefix + "/";
  }
  LOG.debug("prefixes: " + xorPrefix + ", " + rsPrefix);

  // get a list of corrupted files (not considering parity blocks just yet)
  // from the name node
  // these are the only files we need to consider:
  // if a file has no corrupted data blocks, it is OK even if some
  // of its parity blocks are corrupted, so no further checking is
  // necessary
  final String[] files = RaidDFSUtil.getCorruptFiles(dfs);
  final List<Path> corruptFileCandidates = new LinkedList<Path>();
  for (final String f: files) {
    final Path p = new Path(f);
    // if this file is a parity file
    // or if it does not start with the specified path,
    // ignore it
    if (!p.toString().startsWith(xorPrefix) &&
        !p.toString().startsWith(rsPrefix) &&
        p.toString().startsWith(path)) {
      corruptFileCandidates.add(p);
    }
  }
  // filter files marked for deletion
  RaidUtils.filterTrash(conf, corruptFileCandidates);

  int numberOfCorruptFiles = 0;

  for (final Path corruptFileCandidate: corruptFileCandidates) {
    if (isFileCorrupt(dfs, corruptFileCandidate)) {
      System.out.println(corruptFileCandidate.toString());
      numberOfCorruptFiles++;
    }
  }

  return numberOfCorruptFiles;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:65,代码来源:RaidShell.java

示例5: implBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Create a file with three stripes, corrupt a block each in two stripes,
 * and wait for the the file to be fixed.
 */
protected void implBlockFix(boolean local) throws Exception {
  LOG.info("Test testBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1); // never har
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  Path destPath = new Path("/destraid/user/dhruba/raidtest");
  long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                                        1, 7, blockSize);
  long file1Len = fileSys.getFileStatus(file1).getLen();
  LOG.info("Test testBlockFix created test files");

  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
  localConf.setInt("raid.blockfix.interval", 1000);
  if (local) {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.LocalBlockFixer");
  } else {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.DistBlockFixer");
  }
  localConf.setLong("raid.blockfix.filespertask", 2L);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
    cnode.stop(); cnode.join();
    
    FileStatus srcStat = fileSys.getFileStatus(file1);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, file1.toUri().getPath(), 0, srcStat.getLen());

    String[] corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockFixer.filesFixed());
    
    // Corrupt blocks in two different stripes. We can fix them.
    int[] corruptBlockIdxs = new int[]{0, 4, 6};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock());
    reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);
    
    corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());
    assertEquals("wrong number of corrupt blocks", 3,
      RaidDFSUtil.corruptBlocksInFile(dfs, file1.toUri().getPath(), 0,
        srcStat.getLen()).size());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockFixer.filesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test testBlockFix waiting for files to be fixed.");
      Thread.sleep(1000);
    }
    assertEquals("file not fixed", 1, cnode.blockFixer.filesFixed());
    
    dfs = getDFS(conf, dfs);
    assertTrue("file not fixed",
               TestRaidDfs.validateFile(dfs, file1, file1Len, crc1));

  } catch (Exception e) {
    LOG.info("Test testBlockFix Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testBlockFix completed.");
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:81,代码来源:TestBlockFixer.java

示例6: testDecoder

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testDecoder() throws Exception {
  mySetup();
  int stripeSize = 10;
  int paritySize = 4;
  long blockSize = 8192;
  Path file1 = new Path("/user/raidtest/file1");
  Path recoveredFile1 = new Path("/user/raidtest/file1.recovered");
  Path parityFile1 = new Path("/rsraid/user/raidtest/file1");
  long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                                        1, 25, blockSize);
  FileStatus file1Stat = fileSys.getFileStatus(file1);

  conf.setInt("raid.rsdecoder.bufsize", 512);
  conf.setInt("raid.rsencoder.bufsize", 512);

  try {
    // First encode the file.
    ReedSolomonEncoder encoder = new ReedSolomonEncoder(
      conf, stripeSize, paritySize);
    short parityRepl = 1;
    encoder.encodeFile(fileSys, file1, fileSys, parityFile1, parityRepl,
      Reporter.NULL);

    // Ensure there are no corrupt files yet.
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    String[] corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals(corruptFiles.length, 0);

    // Now corrupt the file.
    long corruptOffset = blockSize * 5;
    FileStatus srcStat = fileSys.getFileStatus(file1);
    LocatedBlocks locations = RaidDFSUtil.getBlockLocations(dfs,
        file1.toUri().getPath(), 0, srcStat.getLen());
    corruptBlock(locations.get(5).getBlock());
    corruptBlock(locations.get(6).getBlock());
    TestBlockFixer.reportCorruptBlocks(dfs, file1, new int[]{5, 6},
        srcStat.getBlockSize());

    // Ensure file is corrupted.
    corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals(corruptFiles.length, 1);
    assertEquals(corruptFiles[0], file1.toString());

    // Fix the file.
    ReedSolomonDecoder decoder = new ReedSolomonDecoder(
      conf, stripeSize, paritySize);
    decoder.decodeFile(fileSys, file1, fileSys, parityFile1,
              corruptOffset, recoveredFile1);
    assertTrue(TestRaidDfs.validateFile(
                  fileSys, recoveredFile1, file1Stat.getLen(), crc1));
  } finally {
    myTearDown();
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:55,代码来源:TestReedSolomonDecoder.java


注:本文中的org.apache.hadoop.hdfs.RaidDFSUtil.getCorruptFiles方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。