当前位置: 首页>>代码示例>>Java>>正文


Java RaidDFSUtil.reportCorruptBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.RaidDFSUtil.reportCorruptBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java RaidDFSUtil.reportCorruptBlocks方法的具体用法?Java RaidDFSUtil.reportCorruptBlocks怎么用?Java RaidDFSUtil.reportCorruptBlocks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.RaidDFSUtil的用法示例。


在下文中一共展示了RaidDFSUtil.reportCorruptBlocks方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    TestBlockFixer.verifyMetrics(fileSys, cnode, local, 1L, 
        corruptBlockIdxs.length);

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:73,代码来源:TestDirectoryBlockFixer.java

示例2: testMultiplePriorities

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testMultiplePriorities() throws Exception {
  LOG.info("Test testMultiplePriorities started.");
  Path srcFile = new Path("/home/test/file1");
  int repl = 1;
  int numBlocks = 8;
  long blockSize = 16384;
  int stripeLength = 3;
  Path destPath = new Path("/destraidrs");
  mySetup(stripeLength, -1); // never har
  Codec codec = Codec.getCodec("rs");
  LOG.info("Starting testMultiplePriorities");
  try {
    // Create test file and raid it.
    TestRaidDfs.createTestFilePartialLastBlock(
      fileSys, srcFile, repl, numBlocks, blockSize);
    FileStatus stat = fileSys.getFileStatus(srcFile);
    RaidNode.doRaid(conf, stat,
      destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
      false, repl, repl);

    // Corrupt first block of file.
    int blockIdxToCorrupt = 1;
    LOG.info("Corrupt block " + blockIdxToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile, stat.getLen());
    corruptBlock(locations.get(blockIdxToCorrupt).getBlock(),
        dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(fileSys, srcFile, new int[]{1}, blockSize);

    // Create Block Fixer and fix.
    FakeDistBlockIntegrityMonitor distBlockFixer = new FakeDistBlockIntegrityMonitor(conf);
    assertEquals(0, distBlockFixer.submittedJobs.size());

    // waiting for one job to submit
    long startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() - startTime < 120000 &&
           distBlockFixer.submittedJobs.size() == 0) { 
      distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
      LOG.info("Waiting for jobs to submit");
      Thread.sleep(10000);
    }
    int submittedJob = distBlockFixer.submittedJobs.size();
    LOG.info("Already Submitted " + submittedJob + " jobs");
    assertTrue("Should submit more than 1 jobs", submittedJob >= 1);

    // Corrupt one more block.
    blockIdxToCorrupt = 4;
    LOG.info("Corrupt block " + blockIdxToCorrupt + " of file " + srcFile);
    locations = getBlockLocations(srcFile, stat.getLen());
    corruptBlock(locations.get(blockIdxToCorrupt).getBlock(),
        dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(fileSys, srcFile, new int[]{4}, blockSize);

    // A new job should be submitted since two blocks are corrupt.
    startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() - startTime < 120000 &&
           distBlockFixer.submittedJobs.size() == submittedJob) { 
      distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
      LOG.info("Waiting for more jobs to submit");
      Thread.sleep(10000);
    }
    LOG.info("Already Submitted " + distBlockFixer.submittedJobs.size()  + " jobs");
    assertTrue("Should submit more than 1 jobs",
        distBlockFixer.submittedJobs.size() - submittedJob >= 1);
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:68,代码来源:TestBlockFixer.java

示例3: verifyDecoder

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void verifyDecoder(String code, int parallelism) throws Exception {
  Codec codec = Codec.getCodec(code);
  conf.setInt("raid.encoder.parallelism", parallelism);
  ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
  cb.addPolicy("RaidTest1", "/user/dikang/raidtest/file" + code + parallelism,
      1, 1, code);
  cb.persist();
  Path srcPath = new Path("/user/dikang/raidtest/file" + code + parallelism +
      "/file1");
  long blockSize = 8192 * 1024L;
  
  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
      1, 7, blockSize);
  doRaid(srcPath, codec);
  FileStatus srcStat = fileSys.getFileStatus(srcPath);
  ParityFilePair pair = ParityFilePair.getParityFile(codec, srcStat, conf);
  
  FileStatus file1Stat = fileSys.getFileStatus(srcPath);
  long length = file1Stat.getLen();
  LocatedBlocks file1Loc =
      RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
          srcPath.toUri().getPath(),
          0, length);
   
  // corrupt file
    
  int[] corruptBlockIdxs = new int[] {5};
  long errorOffset = 5 * blockSize;
  for (int idx: corruptBlockIdxs) {
    TestBlockFixer.corruptBlock(file1Loc.get(idx).getBlock(), dfsCluster);
  }
    
  RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
      corruptBlockIdxs, blockSize);
  
  Decoder decoder = new Decoder(conf, codec);
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  decoder.codec.simulateBlockFix = true;
  CRC32 oldCRC = decoder.fixErasedBlock(fileSys, srcStat, fileSys, 
      pair.getPath(), true, blockSize, errorOffset, blockSize, false, 
      out, null, null, false);
  
  decoder.codec.simulateBlockFix = false;
  out = new ByteArrayOutputStream();
  decoder.fixErasedBlock(fileSys, srcStat, fileSys, 
      pair.getPath(), true, blockSize, errorOffset, blockSize, false, 
      out, null, null, false);
  
  // calculate the new crc
  CRC32 newCRC = new CRC32();
  byte[] constructedBytes = out.toByteArray();
  newCRC.update(constructedBytes);
  
  assertEquals(oldCRC.getValue(), newCRC.getValue());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:56,代码来源:TestDecoder.java

示例4: testParityHarBadBlockFixer

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testParityHarBadBlockFixer() throws Exception {
  LOG.info("Test testParityHarBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1, "org.apache.hadoop.raid.BadXORCode",
      "org.apache.hadoop.raid.BadReedSolomonCode", "rs", true); 
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  // Parity file will have 7 blocks.
  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                             1, 20, blockSize);
  LOG.info("Created test files");
  
  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.setInt(RaidNode.RAID_PARITY_HAR_THRESHOLD_DAYS_KEY, 0);
  localConf.set("raid.blockfix.classname",
                "org.apache.hadoop.raid.DistBlockIntegrityMonitor");
  localConf.setLong("raid.blockfix.filespertask", 2L);
  
  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    Path harDirectory =
      new Path("/destraidrs/user/dhruba/raidtest/raidtest" +
               RaidNode.HAR_SUFFIX);
    long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < 1000 * 120) {
      if (fileSys.exists(harDirectory)) {
        break;
      }
      LOG.info("Waiting for har");
      Thread.sleep(1000);
    }
    assertEquals(true, fileSys.exists(harDirectory));
    cnode.stop(); cnode.join();
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    // Corrupt source blocks
    FileStatus stat = fileSys.getFileStatus(file1);
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, stat.getLen());
    int[] corruptBlockIdxs = new int[]{0};
    for (int idx: corruptBlockIdxs) {
      TestBlockFixer.corruptBlock(locs.get(idx).getBlock(),
          dfsCluster);
    }
    RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs,
        stat.getBlockSize());

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Waiting for files to be fixed.");
      Thread.sleep(1000);
    }

    long checkCRC = RaidDFSUtil.getCRC(fileSys, file1);
    assertEquals("file not fixed", crc, checkCRC);
    // Verify the counters are right
    long expectedNumFailures = corruptBlockIdxs.length;
    assertEquals(expectedNumFailures,
        cnode.blockIntegrityMonitor.getNumBlockFixSimulationFailures());
    assertEquals(0,
        cnode.blockIntegrityMonitor.getNumBlockFixSimulationSuccess());
  } catch (Exception e) {
    LOG.info("Exception ", e);
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testParityHarBlockFix completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:79,代码来源:TestSimulationParityBlockFixer.java

示例5: doThePartialTest

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
private boolean doThePartialTest(Codec codec,
                              int blockNum,
                              int[] corruptBlockIdxs) throws Exception {
  long blockSize = 8192 * 1024L;
  int bufferSize = 4192 * 1024;

  Path srcPath = new Path("/user/dikang/raidtest/file" + 
                          UUID.randomUUID().toString());

  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
      1, blockNum, blockSize);
  
  DistributedRaidFileSystem raidFs = getRaidFS();
  assertTrue(raidFs.exists(srcPath));
  
  // generate the parity files.
  doRaid(srcPath, codec);

  FileStatus file1Stat = fileSys.getFileStatus(srcPath);
  long length = file1Stat.getLen();
  LocatedBlocks file1Loc =
      RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
          srcPath.toUri().getPath(),
          0, length);
  // corrupt file1
  
  for (int idx: corruptBlockIdxs) {
    corruptBlock(file1Loc.get(idx).getBlock(), 
                              dfs);
  }
  RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
                       corruptBlockIdxs, blockSize);
  
  // verify the partial read
  byte[] buffer = new byte[bufferSize];
  FSDataInputStream in = raidFs.open(srcPath);
  
  long numRead = 0;
  CRC32 newcrc = new CRC32();
  
  int num = 0;
  while (num >= 0) {
    num = in.read(numRead, buffer, 0, bufferSize);
    if (num < 0) {
      break;
    }
    numRead += num;
    newcrc.update(buffer, 0, num);
  }
  in.close();

  if (numRead != length) {
    LOG.info("Number of bytes read " + numRead +
             " does not match file size " + length);
    return false;
  }

  LOG.info(" Newcrc " + newcrc.getValue() + " old crc " + crc);
  if (newcrc.getValue() != crc) {
    LOG.info("CRC mismatch of file " + srcPath.toUri().getPath() + ": " + 
              newcrc.getValue() + " vs. " + crc);
    return false;
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:66,代码来源:TestReadConstruction.java

示例6: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    assertEquals("file not fixed",
                 1, cnode.blockIntegrityMonitor.getNumFilesFixed());

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:73,代码来源:TestDirectoryBlockFixer.java

示例7: implBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Create a file with three stripes, corrupt a block each in two stripes,
 * and wait for the the file to be fixed.
 */
private void implBlockFix(boolean local) throws Exception {
  LOG.info("Test testBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1); // never har
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  Path destPath = new Path("/destraid/user/dhruba/raidtest");
  long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                                        1, 7, blockSize);
  long file1Len = fileSys.getFileStatus(file1).getLen();
  LOG.info("Test testBlockFix created test files");

  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.setInt("raid.blockfix.interval", 1000);
  if (local) {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  } else {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.DistBlockIntegrityMonitor");
  }
  localConf.setLong("raid.blockfix.filespertask", 2L);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
    cnode.stop(); cnode.join();
    
    FileStatus srcStat = fileSys.getFileStatus(file1);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, file1.toUri().getPath(), 0, srcStat.getLen());

    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());
    
    // Corrupt blocks in two different stripes. We can fix them.
    int[] corruptBlockIdxs = new int[]{0, 4, 6};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);
    
    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());
    assertEquals("wrong number of corrupt blocks", 3,
      RaidDFSUtil.corruptBlocksInFile(dfs, file1.toUri().getPath(), 0,
        srcStat.getLen()).size());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test testBlockFix waiting for files to be fixed.");
      Thread.sleep(1000);
    }
    assertEquals("file not fixed", 1, cnode.blockIntegrityMonitor.getNumFilesFixed());
    
    dfs = getDFS(conf, dfs);
    assertTrue("file not fixed",
               TestRaidDfs.validateFile(dfs, file1, file1Len, crc1));

  } catch (Exception e) {
    LOG.info("Test testBlockFix Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testBlockFix completed.");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:80,代码来源:TestBlockFixer.java

示例8: testMultiplePriorities

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testMultiplePriorities() throws Exception {
  Path srcFile = new Path("/home/test/file1");
  int repl = 1;
  int numBlocks = 8;
  long blockSize = 16384;
  int stripeLength = 3;
  Path destPath = new Path("/destraidrs");
  mySetup(stripeLength, -1); // never har
  Codec codec = Codec.getCodec("rs");
  LOG.info("Starting testMultiplePriorities");
  try {
    // Create test file and raid it.
    TestRaidDfs.createTestFilePartialLastBlock(
      fileSys, srcFile, repl, numBlocks, blockSize);
    FileStatus stat = fileSys.getFileStatus(srcFile);
    RaidNode.doRaid(conf, stat,
      destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
      false, repl, repl);

    // Corrupt first block of file.
    int blockIdxToCorrupt = 1;
    LOG.info("Corrupt block " + blockIdxToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile, stat.getLen());
    corruptBlock(locations.get(blockIdxToCorrupt).getBlock().getBlockName(),
        dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(fileSys, srcFile, new int[]{1}, blockSize);

    // Create Block Fixer and fix.
    FakeDistBlockIntegrityMonitor distBlockFixer = new FakeDistBlockIntegrityMonitor(conf);
    assertEquals(0, distBlockFixer.submittedJobs.size());

    // One job should be submitted.
    distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
    assertEquals(1, distBlockFixer.submittedJobs.size());

    // No new job should be submitted since we already have one.
    distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
    assertEquals(1, distBlockFixer.submittedJobs.size());

    // Corrupt one more block.
    blockIdxToCorrupt = 4;
    LOG.info("Corrupt block " + blockIdxToCorrupt + " of file " + srcFile);
    locations = getBlockLocations(srcFile, stat.getLen());
    corruptBlock(locations.get(blockIdxToCorrupt).getBlock().getBlockName(),
        dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(fileSys, srcFile, new int[]{4}, blockSize);

    // A new job should be submitted since two blocks are corrupt.
    distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
    assertEquals(2, distBlockFixer.submittedJobs.size());
  } finally {
    myTearDown();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:55,代码来源:TestBlockFixer.java

示例9: doThePartialTest

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
private boolean doThePartialTest(Codec codec,
                              int blockNum,
                              int[] corruptBlockIdxs) throws Exception {
  long blockSize = 8192L;
  int bufferSize = 4192;

  Path srcPath = new Path("/user/dikang/raidtest/file" + 
                          UUID.randomUUID().toString());

  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
      1, blockNum, blockSize);
  
  DistributedRaidFileSystem raidFs = getRaidFS();
  assertTrue(raidFs.exists(srcPath));
  
  // generate the parity files.
  doRaid(srcPath, codec);

  FileStatus file1Stat = fileSys.getFileStatus(srcPath);
  long length = file1Stat.getLen();
  LocatedBlocks file1Loc =
      RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
          srcPath.toUri().getPath(),
          0, length);
  // corrupt file1
  
  for (int idx: corruptBlockIdxs) {
    corruptBlock(file1Loc.get(idx).getBlock().getBlockName(), 
                              dfs);
  }
  RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
                       corruptBlockIdxs, blockSize);
  
  // verify the partial read
  byte[] buffer = new byte[bufferSize];
  FSDataInputStream in = raidFs.open(srcPath);
  
  long numRead = 0;
  CRC32 newcrc = new CRC32();
  
  int num = 0;
  while (num >= 0) {
    num = in.read(numRead, buffer, 0, bufferSize);
    if (num < 0) {
      break;
    }
    numRead += num;
    newcrc.update(buffer, 0, num);
  }
  in.close();

  if (numRead != length) {
    LOG.info("Number of bytes read " + numRead +
             " does not match file size " + length);
    return false;
  }

  LOG.info(" Newcrc " + newcrc.getValue() + " old crc " + crc);
  if (newcrc.getValue() != crc) {
    LOG.info("CRC mismatch of file " + srcPath.toUri().getPath() + ": " + 
              newcrc + " vs. " + crc);
    return false;
  }
  return true;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:66,代码来源:TestReadConstruction.java


注:本文中的org.apache.hadoop.hdfs.RaidDFSUtil.reportCorruptBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。