当前位置: 首页>>代码示例>>Java>>正文


Java RaidDFSUtil.getBlockLocations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.RaidDFSUtil.getBlockLocations方法的典型用法代码示例。如果您正苦于以下问题:Java RaidDFSUtil.getBlockLocations方法的具体用法?Java RaidDFSUtil.getBlockLocations怎么用?Java RaidDFSUtil.getBlockLocations使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.RaidDFSUtil的用法示例。


在下文中一共展示了RaidDFSUtil.getBlockLocations方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testVerifySourceFile

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testVerifySourceFile() throws Exception {
  mySetup();
  
  try {
    Path srcPath = new Path("/user/dikang/raidtest/file0");
    int numBlocks = 8;
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
        1, numBlocks, 8192L);
    assertTrue(fileSys.exists(srcPath));
    Codec codec = Codec.getCodec("rs");
    FileStatus stat = fileSys.getFileStatus(srcPath);
    
    // verify good file
    assertTrue(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, null, codec, 
        RaidUtils.NULL_PROGRESSABLE, true));
    
    // verify bad file
    LocatedBlocks fileLoc =
        RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
            srcPath.toUri().getPath(),
            0, stat.getLen());
    // corrupt file1
    Random rand = new Random();
    int idx = rand.nextInt(numBlocks);
    TestRaidDfs.corruptBlock(srcPath, 
        fileLoc.getLocatedBlocks().get(idx).getBlock(), 
        NUM_DATANODES, true, dfs);
    
    assertFalse(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, null, codec, 
        RaidUtils.NULL_PROGRESSABLE, true));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TestFastFileCheck.java

示例2: corruptBlocksInFile

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Returns the corrupt blocks in a file.
 */
List<LocatedBlock> corruptBlocksInFile(DistributedFileSystem fs,
                                       String uriPath, FileStatus stat)
  throws IOException {
  List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>();
  LocatedBlocks locatedBlocks =
    RaidDFSUtil.getBlockLocations(fs, uriPath, 0, stat.getLen());
  for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) {
    if (b.isCorrupt() ||
        (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
      corrupt.add(b);
    }
  }
  return corrupt;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:18,代码来源:BlockFixer.java

示例3: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    TestBlockFixer.verifyMetrics(fileSys, cnode, local, 1L, 
        corruptBlockIdxs.length);

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:73,代码来源:TestDirectoryBlockFixer.java

示例4: verifyDecoder

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void verifyDecoder(String code, int parallelism) throws Exception {
  Codec codec = Codec.getCodec(code);
  conf.setInt("raid.encoder.parallelism", parallelism);
  ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
  cb.addPolicy("RaidTest1", "/user/dikang/raidtest/file" + code + parallelism,
      1, 1, code);
  cb.persist();
  Path srcPath = new Path("/user/dikang/raidtest/file" + code + parallelism +
      "/file1");
  long blockSize = 8192 * 1024L;
  
  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
      1, 7, blockSize);
  doRaid(srcPath, codec);
  FileStatus srcStat = fileSys.getFileStatus(srcPath);
  ParityFilePair pair = ParityFilePair.getParityFile(codec, srcStat, conf);
  
  FileStatus file1Stat = fileSys.getFileStatus(srcPath);
  long length = file1Stat.getLen();
  LocatedBlocks file1Loc =
      RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
          srcPath.toUri().getPath(),
          0, length);
   
  // corrupt file
    
  int[] corruptBlockIdxs = new int[] {5};
  long errorOffset = 5 * blockSize;
  for (int idx: corruptBlockIdxs) {
    TestBlockFixer.corruptBlock(file1Loc.get(idx).getBlock(), dfsCluster);
  }
    
  RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
      corruptBlockIdxs, blockSize);
  
  Decoder decoder = new Decoder(conf, codec);
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  decoder.codec.simulateBlockFix = true;
  CRC32 oldCRC = decoder.fixErasedBlock(fileSys, srcStat, fileSys, 
      pair.getPath(), true, blockSize, errorOffset, blockSize, false, 
      out, null, null, false);
  
  decoder.codec.simulateBlockFix = false;
  out = new ByteArrayOutputStream();
  decoder.fixErasedBlock(fileSys, srcStat, fileSys, 
      pair.getPath(), true, blockSize, errorOffset, blockSize, false, 
      out, null, null, false);
  
  // calculate the new crc
  CRC32 newCRC = new CRC32();
  byte[] constructedBytes = out.toByteArray();
  newCRC.update(constructedBytes);
  
  assertEquals(oldCRC.getValue(), newCRC.getValue());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:56,代码来源:TestDecoder.java

示例5: testParityHarBadBlockFixer

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testParityHarBadBlockFixer() throws Exception {
  LOG.info("Test testParityHarBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1, "org.apache.hadoop.raid.BadXORCode",
      "org.apache.hadoop.raid.BadReedSolomonCode", "rs", true); 
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  // Parity file will have 7 blocks.
  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                             1, 20, blockSize);
  LOG.info("Created test files");
  
  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.setInt(RaidNode.RAID_PARITY_HAR_THRESHOLD_DAYS_KEY, 0);
  localConf.set("raid.blockfix.classname",
                "org.apache.hadoop.raid.DistBlockIntegrityMonitor");
  localConf.setLong("raid.blockfix.filespertask", 2L);
  
  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    Path harDirectory =
      new Path("/destraidrs/user/dhruba/raidtest/raidtest" +
               RaidNode.HAR_SUFFIX);
    long start = System.currentTimeMillis();
    while (System.currentTimeMillis() - start < 1000 * 120) {
      if (fileSys.exists(harDirectory)) {
        break;
      }
      LOG.info("Waiting for har");
      Thread.sleep(1000);
    }
    assertEquals(true, fileSys.exists(harDirectory));
    cnode.stop(); cnode.join();
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    // Corrupt source blocks
    FileStatus stat = fileSys.getFileStatus(file1);
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, file1.toUri().getPath(), 0, stat.getLen());
    int[] corruptBlockIdxs = new int[]{0};
    for (int idx: corruptBlockIdxs) {
      TestBlockFixer.corruptBlock(locs.get(idx).getBlock(),
          dfsCluster);
    }
    RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs,
        stat.getBlockSize());

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Waiting for files to be fixed.");
      Thread.sleep(1000);
    }

    long checkCRC = RaidDFSUtil.getCRC(fileSys, file1);
    assertEquals("file not fixed", crc, checkCRC);
    // Verify the counters are right
    long expectedNumFailures = corruptBlockIdxs.length;
    assertEquals(expectedNumFailures,
        cnode.blockIntegrityMonitor.getNumBlockFixSimulationFailures());
    assertEquals(0,
        cnode.blockIntegrityMonitor.getNumBlockFixSimulationSuccess());
  } catch (Exception e) {
    LOG.info("Exception ", e);
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testParityHarBlockFix completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:79,代码来源:TestSimulationParityBlockFixer.java

示例6: doThePartialTest

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
private boolean doThePartialTest(Codec codec,
                              int blockNum,
                              int[] corruptBlockIdxs) throws Exception {
  long blockSize = 8192 * 1024L;
  int bufferSize = 4192 * 1024;

  Path srcPath = new Path("/user/dikang/raidtest/file" + 
                          UUID.randomUUID().toString());

  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
      1, blockNum, blockSize);
  
  DistributedRaidFileSystem raidFs = getRaidFS();
  assertTrue(raidFs.exists(srcPath));
  
  // generate the parity files.
  doRaid(srcPath, codec);

  FileStatus file1Stat = fileSys.getFileStatus(srcPath);
  long length = file1Stat.getLen();
  LocatedBlocks file1Loc =
      RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
          srcPath.toUri().getPath(),
          0, length);
  // corrupt file1
  
  for (int idx: corruptBlockIdxs) {
    corruptBlock(file1Loc.get(idx).getBlock(), 
                              dfs);
  }
  RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
                       corruptBlockIdxs, blockSize);
  
  // verify the partial read
  byte[] buffer = new byte[bufferSize];
  FSDataInputStream in = raidFs.open(srcPath);
  
  long numRead = 0;
  CRC32 newcrc = new CRC32();
  
  int num = 0;
  while (num >= 0) {
    num = in.read(numRead, buffer, 0, bufferSize);
    if (num < 0) {
      break;
    }
    numRead += num;
    newcrc.update(buffer, 0, num);
  }
  in.close();

  if (numRead != length) {
    LOG.info("Number of bytes read " + numRead +
             " does not match file size " + length);
    return false;
  }

  LOG.info(" Newcrc " + newcrc.getValue() + " old crc " + crc);
  if (newcrc.getValue() != crc) {
    LOG.info("CRC mismatch of file " + srcPath.toUri().getPath() + ": " + 
              newcrc.getValue() + " vs. " + crc);
    return false;
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:66,代码来源:TestReadConstruction.java

示例7: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    assertEquals("file not fixed",
                 1, cnode.blockIntegrityMonitor.getNumFilesFixed());

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:73,代码来源:TestDirectoryBlockFixer.java

示例8: implBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Create a file with three stripes, corrupt a block each in two stripes,
 * and wait for the the file to be fixed.
 */
private void implBlockFix(boolean local) throws Exception {
  LOG.info("Test testBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1); // never har
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  Path destPath = new Path("/destraid/user/dhruba/raidtest");
  long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                                        1, 7, blockSize);
  long file1Len = fileSys.getFileStatus(file1).getLen();
  LOG.info("Test testBlockFix created test files");

  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.setInt("raid.blockfix.interval", 1000);
  if (local) {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  } else {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.DistBlockIntegrityMonitor");
  }
  localConf.setLong("raid.blockfix.filespertask", 2L);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
    cnode.stop(); cnode.join();
    
    FileStatus srcStat = fileSys.getFileStatus(file1);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, file1.toUri().getPath(), 0, srcStat.getLen());

    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());
    
    // Corrupt blocks in two different stripes. We can fix them.
    int[] corruptBlockIdxs = new int[]{0, 4, 6};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock().getBlockName(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);
    
    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());
    assertEquals("wrong number of corrupt blocks", 3,
      RaidDFSUtil.corruptBlocksInFile(dfs, file1.toUri().getPath(), 0,
        srcStat.getLen()).size());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test testBlockFix waiting for files to be fixed.");
      Thread.sleep(1000);
    }
    assertEquals("file not fixed", 1, cnode.blockIntegrityMonitor.getNumFilesFixed());
    
    dfs = getDFS(conf, dfs);
    assertTrue("file not fixed",
               TestRaidDfs.validateFile(dfs, file1, file1Len, crc1));

  } catch (Exception e) {
    LOG.info("Test testBlockFix Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testBlockFix completed.");
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:80,代码来源:TestBlockFixer.java

示例9: doThePartialTest

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
private boolean doThePartialTest(Codec codec,
                              int blockNum,
                              int[] corruptBlockIdxs) throws Exception {
  long blockSize = 8192L;
  int bufferSize = 4192;

  Path srcPath = new Path("/user/dikang/raidtest/file" + 
                          UUID.randomUUID().toString());

  long crc = TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
      1, blockNum, blockSize);
  
  DistributedRaidFileSystem raidFs = getRaidFS();
  assertTrue(raidFs.exists(srcPath));
  
  // generate the parity files.
  doRaid(srcPath, codec);

  FileStatus file1Stat = fileSys.getFileStatus(srcPath);
  long length = file1Stat.getLen();
  LocatedBlocks file1Loc =
      RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
          srcPath.toUri().getPath(),
          0, length);
  // corrupt file1
  
  for (int idx: corruptBlockIdxs) {
    corruptBlock(file1Loc.get(idx).getBlock().getBlockName(), 
                              dfs);
  }
  RaidDFSUtil.reportCorruptBlocks((DistributedFileSystem)fileSys, srcPath,
                       corruptBlockIdxs, blockSize);
  
  // verify the partial read
  byte[] buffer = new byte[bufferSize];
  FSDataInputStream in = raidFs.open(srcPath);
  
  long numRead = 0;
  CRC32 newcrc = new CRC32();
  
  int num = 0;
  while (num >= 0) {
    num = in.read(numRead, buffer, 0, bufferSize);
    if (num < 0) {
      break;
    }
    numRead += num;
    newcrc.update(buffer, 0, num);
  }
  in.close();

  if (numRead != length) {
    LOG.info("Number of bytes read " + numRead +
             " does not match file size " + length);
    return false;
  }

  LOG.info(" Newcrc " + newcrc.getValue() + " old crc " + crc);
  if (newcrc.getValue() != crc) {
    LOG.info("CRC mismatch of file " + srcPath.toUri().getPath() + ": " + 
              newcrc + " vs. " + crc);
    return false;
  }
  return true;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:66,代码来源:TestReadConstruction.java

示例10: implBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
/**
 * Create a file with three stripes, corrupt a block each in two stripes,
 * and wait for the the file to be fixed.
 */
protected void implBlockFix(boolean local) throws Exception {
  LOG.info("Test testBlockFix started.");
  long blockSize = 8192L;
  int stripeLength = 3;
  mySetup(stripeLength, -1); // never har
  Path file1 = new Path("/user/dhruba/raidtest/file1");
  Path destPath = new Path("/destraid/user/dhruba/raidtest");
  long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                                        1, 7, blockSize);
  long file1Len = fileSys.getFileStatus(file1).getLen();
  LOG.info("Test testBlockFix created test files");

  // create an instance of the RaidNode
  Configuration localConf = new Configuration(conf);
  localConf.set(RaidNode.RAID_LOCATION_KEY, "/destraid");
  localConf.setInt("raid.blockfix.interval", 1000);
  if (local) {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.LocalBlockFixer");
  } else {
    localConf.set("raid.blockfix.classname",
                  "org.apache.hadoop.raid.DistBlockFixer");
  }
  localConf.setLong("raid.blockfix.filespertask", 2L);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForFileRaided(LOG, fileSys, file1, destPath);
    cnode.stop(); cnode.join();
    
    FileStatus srcStat = fileSys.getFileStatus(file1);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, file1.toUri().getPath(), 0, srcStat.getLen());

    String[] corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockFixer.filesFixed());
    
    // Corrupt blocks in two different stripes. We can fix them.
    int[] corruptBlockIdxs = new int[]{0, 4, 6};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock());
    reportCorruptBlocks(dfs, file1, corruptBlockIdxs, blockSize);
    
    corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted", 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], file1.toUri().getPath());
    assertEquals("wrong number of corrupt blocks", 3,
      RaidDFSUtil.corruptBlocksInFile(dfs, file1.toUri().getPath(), 0,
        srcStat.getLen()).size());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockFixer.filesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test testBlockFix waiting for files to be fixed.");
      Thread.sleep(1000);
    }
    assertEquals("file not fixed", 1, cnode.blockFixer.filesFixed());
    
    dfs = getDFS(conf, dfs);
    assertTrue("file not fixed",
               TestRaidDfs.validateFile(dfs, file1, file1Len, crc1));

  } catch (Exception e) {
    LOG.info("Test testBlockFix Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test testBlockFix completed.");
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:81,代码来源:TestBlockFixer.java

示例11: testDecoder

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入方法依赖的package包/类
public void testDecoder() throws Exception {
  mySetup();
  int stripeSize = 10;
  int paritySize = 4;
  long blockSize = 8192;
  Path file1 = new Path("/user/raidtest/file1");
  Path recoveredFile1 = new Path("/user/raidtest/file1.recovered");
  Path parityFile1 = new Path("/rsraid/user/raidtest/file1");
  long crc1 = TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1,
                                                        1, 25, blockSize);
  FileStatus file1Stat = fileSys.getFileStatus(file1);

  conf.setInt("raid.rsdecoder.bufsize", 512);
  conf.setInt("raid.rsencoder.bufsize", 512);

  try {
    // First encode the file.
    ReedSolomonEncoder encoder = new ReedSolomonEncoder(
      conf, stripeSize, paritySize);
    short parityRepl = 1;
    encoder.encodeFile(fileSys, file1, fileSys, parityFile1, parityRepl,
      Reporter.NULL);

    // Ensure there are no corrupt files yet.
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    String[] corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals(corruptFiles.length, 0);

    // Now corrupt the file.
    long corruptOffset = blockSize * 5;
    FileStatus srcStat = fileSys.getFileStatus(file1);
    LocatedBlocks locations = RaidDFSUtil.getBlockLocations(dfs,
        file1.toUri().getPath(), 0, srcStat.getLen());
    corruptBlock(locations.get(5).getBlock());
    corruptBlock(locations.get(6).getBlock());
    TestBlockFixer.reportCorruptBlocks(dfs, file1, new int[]{5, 6},
        srcStat.getBlockSize());

    // Ensure file is corrupted.
    corruptFiles = RaidDFSUtil.getCorruptFiles(dfs);
    assertEquals(corruptFiles.length, 1);
    assertEquals(corruptFiles[0], file1.toString());

    // Fix the file.
    ReedSolomonDecoder decoder = new ReedSolomonDecoder(
      conf, stripeSize, paritySize);
    decoder.decodeFile(fileSys, file1, fileSys, parityFile1,
              corruptOffset, recoveredFile1);
    assertTrue(TestRaidDfs.validateFile(
                  fileSys, recoveredFile1, file1Stat.getLen(), crc1));
  } finally {
    myTearDown();
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:55,代码来源:TestReedSolomonDecoder.java


注:本文中的org.apache.hadoop.hdfs.RaidDFSUtil.getBlockLocations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。