当前位置: 首页>>代码示例>>Java>>正文


Java RaidDFSUtil.getCRC方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.RaidDFSUtil.getCRC方法的典型用法代码示例。如果您正苦于以下问题:Java RaidDFSUtil.getCRC方法的具体用法?Java RaidDFSUtil.getCRC怎么用?Java RaidDFSUtil.getCRC使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.RaidDFSUtil的用法示例。


在下文中一共展示了RaidDFSUtil.getCRC方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    TestBlockFixer.verifyMetrics(fileSys, cnode, local, 1L, 
        corruptBlockIdxs.length);

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:73,代码来源:TestDirectoryBlockFixer.java

示例2: validateSingleFile

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
private void validateSingleFile(String code, FileSystem fileSys, 
    Path sourceDir, int stripeLength, int blockNum, boolean lastPartial)
        throws Exception {
  LOG.info("Test file with " + blockNum + " blocks and " +
        (lastPartial? "partial": "full") + " last block");
  Codec codec = loadTestCodecs(code, stripeLength, true);
  Path parityDir = new Path(codec.parityDirectory);
  RaidDFSUtil.cleanUp(fileSys, sourceDir);
  RaidDFSUtil.cleanUp(fileSys, parityDir);
  fileSys.mkdirs(sourceDir);
  
  Path file1 = new Path(sourceDir, "file1");
  if (!lastPartial) {
    TestRaidDfs.createTestFile(fileSys, file1, 2, blockNum, 8192L);
  } else {
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1, 2,
        blockNum, 8192L);
  }
  Path parityFile = RaidNode.getOriginalParityFile(parityDir, sourceDir);
  // Do directory level raid
  LOG.info("Create a directory-raid parity file " + parityFile);
  assertTrue("Cannot raid directory " + sourceDir, 
      doRaid(conf, fileSys, sourceDir, codec));
  assertEquals("Modification time should be the same", 
      fileSys.getFileStatus(sourceDir).getModificationTime(),
      fileSys.getFileStatus(parityFile).getModificationTime());
  assertEquals("Replica num of source file should be reduced to 1",
      fileSys.getFileStatus(file1).getReplication(), 1);
  assertEquals("Replica num of parity file should be reduced to 1",
      fileSys.getFileStatus(parityFile).getReplication(), 1);
  long dirCRC = RaidDFSUtil.getCRC(fileSys, parityFile);
  long dirLen = fileSys.getFileStatus(parityFile).getLen();
  // remove the parity dir
  RaidDFSUtil.cleanUp(fileSys, parityDir);
  codec = loadTestCodecs(code, stripeLength, false);
  Path parityFile1 = RaidNode.getOriginalParityFile(parityDir,
      file1);
  LOG.info("Create a file-raid parity file " + parityFile1);
  assertTrue("Cannot raid file " + file1, 
      doRaid(conf, fileSys, file1, codec));
  assertTrue("Parity file doesn't match when the file has " + blockNum + 
      " blocks ", 
      TestRaidDfs.validateFile(fileSys, parityFile1, dirLen, dirCRC));
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:45,代码来源:TestDirectoryRaidEncoder.java

示例3: testParityHarBadBlockFixer

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testParityHarBadBlockFixer() throws Exception {
  LOG.info("Test testParityHarBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1, "org.apache.hadoop.raid.BadXORCode",
      "org.apache.hadoop.raid.BadReedSolomonCode", "rs", true); 
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  // Parity file will have 7 blocks.
  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                             1, 20, blockSize);
  LOG.info("Created test files");
  
  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.setInt(RaidNode.RAID_PARITY_HAR_THRESHOLD_DAYS_KEY, 0);
  localConf.set("raid.blockfix.classname",
                "org.apache.hadoop.raid.DistBlockIntegrityMonitor");
  localConf.setLong("raid.blockfix.filespertask", 2L);
  
  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    Path harDirectory =
      new Path("/destraidrs/user/dhruba/raidtest/raidtest" +
               RaidNode.HAR_SUFFIX);
    long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < 1000 * 120) {
      if (fileSys.exists(harDirectory)) {
        break;
      }
      LOG.info("Waiting for har");
      Thread.sleep(1000);
    }
    assertEquals(true, fileSys.exists(harDirectory));
    cnode.stop(); cnode.join();
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    // Corrupt source blocks
    FileStatus stat = fileSys.getFileStatus(file1);
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, stat.getLen());
    int[] corruptBlockIdxs = new int[]{0};
    for (int idx: corruptBlockIdxs) {
      TestBlockFixer.corruptBlock(locs.get(idx).getBlock(),
          dfsCluster);
    }
    RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs,
        stat.getBlockSize());

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Waiting for files to be fixed.");
      Thread.sleep(1000);
    }

    long checkCRC = RaidDFSUtil.getCRC(fileSys, file1);
    assertEquals("file not fixed", crc, checkCRC);
    // Verify the counters are right
    long expectedNumFailures = corruptBlockIdxs.length;
    assertEquals(expectedNumFailures,
        cnode.blockIntegrityMonitor.getNumBlockFixSimulationFailures());
    assertEquals(0,
        cnode.blockIntegrityMonitor.getNumBlockFixSimulationSuccess());
  } catch (Exception e) {
    LOG.info("Exception ", e);
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testParityHarBlockFix completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:79,代码来源:TestSimulationParityBlockFixer.java

示例4: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    assertEquals("file not fixed",
                 1, cnode.blockIntegrityMonitor.getNumFilesFixed());

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:73,代码来源:TestDirectoryBlockFixer.java


注:本文中的org.apache.hadoop.hdfs.RaidDFSUtil.getCRC方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。