当前位置: 首页>>代码示例>>Java>>正文


Java RaidDFSUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.RaidDFSUtil的典型用法代码示例。如果您正苦于以下问题:Java RaidDFSUtil类的具体用法?Java RaidDFSUtil怎么用?Java RaidDFSUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RaidDFSUtil类属于org.apache.hadoop.hdfs包,在下文中一共展示了RaidDFSUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSmallFileDirectory

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
public void testSmallFileDirectory() throws Exception {
  mySetup();
  int stripeLength = 4;
  long blockSize = 8192L;
  try {
    for (String code: RaidDFSUtil.codes) {
      LOG.info("testSmallFileDirectory: Test code " + code);
      Path sourceDir = new Path("/user/raid");
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[]{1000L, 4000L, 1000L}, blockSize, 4096L);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[]{2000L, 3000L, 2000L, 3000L}, blockSize, 3072L);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[]{3000L, 3000L, 3000L, 3000L}, blockSize, 3072L);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[]{511L, 3584L, 3000L, 1234L, 512L, 1234L, 3000L,
          3234L, 511L}, blockSize, 3584L);
    }
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:TestDirectoryRaidEncoder.java

示例2: testDifferentBlockSizeFileDirectory

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
public void testDifferentBlockSizeFileDirectory() throws Exception {
  mySetup();
  int stripeLength = 3;
  long blockSize = 8192L;
  try {
    for (String code: RaidDFSUtil.codes) {
      LOG.info("testDifferentBlockSizeFileDirectory: Test code " + code);
      Path sourceDir = new Path("/user/raid");
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {1000, blockSize, 2*blockSize, 2*blockSize + 1},
          new long[] {blockSize, blockSize, 2*blockSize, blockSize},
          2*blockSize);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {blockSize, 2*blockSize, 3*blockSize, 4*blockSize},
          new long[] {blockSize, 2*blockSize, 3*blockSize, blockSize},
          3*blockSize);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {blockSize+1, 9*blockSize+1, 2*blockSize+1,
          blockSize+1}, new long[]{blockSize, 2*blockSize, 3*blockSize,
          blockSize}, 2*blockSize+512);
    }
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestDirectoryRaidEncoder.java

示例3: getCorruptFiles

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * gets a list of corrupt files from the name node
 * and filters out files that are currently being fixed or 
 * that were recently fixed
 */
private List<Path> getCorruptFiles() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem) 
    (new Path("/")).getFileSystem(getConf());

  String[] files = RaidDFSUtil.getCorruptFiles(dfs);
  List<Path> corruptFiles = new LinkedList<Path>();

  for (String f: files) {
    Path p = new Path(f);
    // filter out files that are being fixed or that were recently fixed
    if (!fileIndex.containsKey(p.toString())) {
      corruptFiles.add(p);
    }
  }
  RaidUtils.filterTrash(getConf(), corruptFiles);

  return corruptFiles;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:24,代码来源:DistBlockFixer.java

示例4: waitForCorruptBlocks

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
private void waitForCorruptBlocks(
  int numCorruptBlocks, DistributedFileSystem dfs, Path file)
  throws Exception {
  String path = file.toUri().getPath();
  FileStatus stat = dfs.getFileStatus(file);
  long start = System.currentTimeMillis();
  long actual = 0;
  do {
    actual = RaidDFSUtil.corruptBlocksInFile(
        dfs, path, 0, stat.getLen()).size();
    if (actual == numCorruptBlocks) break;
    if (System.currentTimeMillis() - start > 120000) break;
    LOG.info("Waiting for " + numCorruptBlocks + " corrupt blocks in " +
      path + ", found " + actual);
    Thread.sleep(1000);
  } while (true);
  assertEquals(numCorruptBlocks, actual);
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:19,代码来源:TestRaidShell.java

示例5: waitUntilCorruptFileCount

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * sleeps for up to 20s until the number of corrupt files 
 * in the file system is equal to the number specified
 */
private void waitUntilCorruptFileCount(DistributedFileSystem dfs,
                                       int corruptFiles)
  throws IOException {
  long waitStart = System.currentTimeMillis();
  while (RaidDFSUtil.getCorruptFiles(dfs).length != corruptFiles) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException ignore) {
      
    }

    if (System.currentTimeMillis() > waitStart + 20000L) {
      break;
    }
  }
  
  int corruptFilesFound = RaidDFSUtil.getCorruptFiles(dfs).length;
  if (corruptFilesFound != corruptFiles) {
    throw new IOException("expected " + corruptFiles + 
                          " corrupt files but got " +
                          corruptFilesFound);
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:28,代码来源:TestRaidShellFsck.java

示例6: testCorruptBlocks

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * corrupt a block in a raided file, and make sure it will be shown in the 
 * Raid missing blocks queue.
 */
@Test
public void testCorruptBlocks() throws IOException {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();
  try {
    cluster = new MiniDFSCluster(conf, 3, true, null);
    
    DistributedFileSystem dfs = DFSUtil.convertToDFS(cluster.getFileSystem());
    String filePath = "/test/file1";
    RaidDFSUtil.constructFakeRaidFile(dfs, filePath, RaidCodec.getCodec("rs"));
    
    FileStatus stat = dfs.getFileStatus(new Path(filePath));
    LocatedBlocks blocks = dfs.getClient().
        getLocatedBlocks(filePath, 0, stat.getLen());
    
    Block block = blocks.getLocatedBlocks().get(0).getBlock();
    DFSTestUtil.corruptBlock(block, cluster);
    
    RaidDFSUtil.reportCorruptBlocksToNN(dfs, 
        new LocatedBlock[] {blocks.getLocatedBlocks().get(0)});
    
    final FSNamesystem namesystem = cluster.getNameNode().namesystem;
    assertEquals(1, namesystem.getRaidMissingBlocksCount()); // one raid missing blocks;
    assertEquals(0, namesystem.getMissingBlocksCount());  // zero non-raid missing blocks;
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:36,代码来源:TestRaidMissingBlocksQueue.java

示例7: testIdenticalBlockSizeFileDirectory

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
public void testIdenticalBlockSizeFileDirectory() throws Exception {
  mySetup();
  int stripeLength = 4;
  long blockSize = 8192L;
  try {
    for (String code: RaidDFSUtil.codes) {
      LOG.info("testIdenticalBlockSizeFileDirectory: Test code " + code);
      Path sourceDir = new Path("/user/raid");
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {1000L, blockSize, 2*blockSize, 4000L}, blockSize,
          blockSize);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {blockSize, 2*blockSize, 3*blockSize, 4*blockSize},
          blockSize, blockSize);
      int halfBlock = (int)blockSize/2;
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {blockSize + halfBlock, 2*blockSize + halfBlock,
                     3*blockSize + halfBlock, 4*blockSize + halfBlock},
          blockSize, blockSize);
      validateMultipleFiles(code, fileSys, sourceDir, stripeLength,
          new long[] {blockSize+1, 9*blockSize+1, 2*blockSize+1,
          3*blockSize+1}, blockSize, blockSize);
    }
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:28,代码来源:TestDirectoryRaidEncoder.java

示例8: testVerifySourceFile

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
public void testVerifySourceFile() throws Exception {
  mySetup();
  
  try {
    Path srcPath = new Path("/user/dikang/raidtest/file0");
    int numBlocks = 8;
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
        1, numBlocks, 8192L);
    assertTrue(fileSys.exists(srcPath));
    Codec codec = Codec.getCodec("rs");
    FileStatus stat = fileSys.getFileStatus(srcPath);
    
    // verify good file
    assertTrue(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, null, codec, 
        RaidUtils.NULL_PROGRESSABLE, true));
    
    // verify bad file
    LocatedBlocks fileLoc =
        RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
            srcPath.toUri().getPath(),
            0, stat.getLen());
    // corrupt file1
    Random rand = new Random();
    int idx = rand.nextInt(numBlocks);
    TestRaidDfs.corruptBlock(srcPath, 
        fileLoc.getLocatedBlocks().get(idx).getBlock(), 
        NUM_DATANODES, true, dfs);
    
    assertFalse(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, null, codec, 
        RaidUtils.NULL_PROGRESSABLE, true));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TestFastFileCheck.java

示例9: corruptBlocksInFile

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * Returns the corrupt blocks in a file.
 */
List<LocatedBlock> corruptBlocksInFile(DistributedFileSystem fs,
                                       String uriPath, FileStatus stat)
  throws IOException {
  List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>();
  LocatedBlocks locatedBlocks =
    RaidDFSUtil.getBlockLocations(fs, uriPath, 0, stat.getLen());
  for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) {
    if (b.isCorrupt() ||
        (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
      corrupt.add(b);
    }
  }
  return corrupt;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:18,代码来源:BlockFixer.java

示例10: getCorruptFiles

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * @return A list of corrupt files as obtained from the namenode
 */
List<Path> getCorruptFiles() throws IOException {
  DistributedFileSystem dfs = helper.getDFS(new Path("/"));

  String[] files = RaidDFSUtil.getCorruptFiles(dfs);
  List<Path> corruptFiles = new LinkedList<Path>();
  for (String f: files) {
    Path p = new Path(f);
    if (!history.containsKey(p.toString())) {
      corruptFiles.add(p);
    }
  }
  RaidUtils.filterTrash(getConf(), corruptFiles);
  return corruptFiles;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:18,代码来源:LocalBlockFixer.java

示例11: testRaidMissingBlocksByTakingDownDataNode

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * Take down a datanode to generate raid missing blocks, and then bring it back
 * will restore the missing blocks.
 */
@Test
public void testRaidMissingBlocksByTakingDownDataNode() throws IOException, InterruptedException {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();
  try {
    cluster = new MiniDFSCluster(conf, 1, true, null);
    final FSNamesystem namesystem = cluster.getNameNode().namesystem;
    final DistributedFileSystem dfs = DFSUtil.convertToDFS(cluster.getFileSystem());
    String filePath = "/test/file1";
    RaidCodec rsCodec = RaidCodec.getCodec("rs");
    RaidDFSUtil.constructFakeRaidFile(dfs, filePath, rsCodec);
    
    DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
        namesystem.heartbeats.toArray(
            new DatanodeDescriptor[1]);
    assertEquals(1, datanodes.length);
    
    // shutdown the datanode
    DataNodeProperties dnprop = shutdownDataNode(cluster, datanodes[0]);
    assertEquals(rsCodec.numStripeBlocks, namesystem.getRaidMissingBlocksCount());
    assertEquals(0, namesystem.getMissingBlocksCount()); // zero non-raid missing block
    assertEquals(0, namesystem.getNonCorruptUnderReplicatedBlocks());
    
    // bring up the datanode
    cluster.restartDataNode(dnprop);
    
    // Wait for block report
    LOG.info("wait for its block report to come in");
    NumberReplicas num;
    FileStatus stat = dfs.getFileStatus(new Path(filePath));
    LocatedBlocks blocks = dfs.getClient().
        getLocatedBlocks(filePath, 0, stat.getLen());
    long startTime = System.currentTimeMillis();
    do {
      Thread.sleep(1000);
      int totalCount = 0;
      namesystem.readLock();
      try {
        for (LocatedBlock block : blocks.getLocatedBlocks()) {
          num = namesystem.countNodes(block.getBlock());
          totalCount += num.liveReplicas();
        }
        if (totalCount == rsCodec.numDataBlocks) {
          break;
        } else {
          LOG.info("wait for block report, received total replicas: " + totalCount);
        }
      } finally {
        namesystem.readUnlock();
      }
    } while (System.currentTimeMillis() - startTime < 30000);
    assertEquals(0, namesystem.getRaidMissingBlocksCount());
    assertEquals(0, namesystem.getMissingBlocksCount()); // zero non-raid missing block
    assertEquals(0, namesystem.getNonCorruptUnderReplicatedBlocks());

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }   
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:66,代码来源:TestRaidMissingBlocksQueue.java

示例12: implParityBlockFix

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
/**
 * Corrupt a parity file and wait for it to get fixed.
 */
private void implParityBlockFix(String testName, boolean local)
  throws Exception {
  LOG.info("Test " + testName + " started.");
  int stripeLength = 3;
  mySetup(stripeLength); 
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  Path dirPath = new Path("/user/dhruba/raidtest");
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  Path destPath = new Path("/destraid/user/dhruba");
  Path parityFile = new Path("/destraid/user/dhruba/raidtest");
  LOG.info("Test " + testName + " created test files");
  Configuration localConf = this.getRaidNodeConfig(conf, local);

  try {
    cnode = RaidNode.createRaidNode(null, localConf);
    TestRaidDfs.waitForDirRaided(LOG, fileSys, dirPath, destPath);
    cnode.stop(); cnode.join();

    long parityCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    FileStatus parityStat = fileSys.getFileStatus(parityFile);
    DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
    LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
      dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
    String[] corruptFiles = DFSUtil.getCorruptFiles(dfs);
    
    assertEquals("no corrupt files expected", 0, corruptFiles.length);
    assertEquals("filesFixed() should return 0 before fixing files",
                 0, cnode.blockIntegrityMonitor.getNumFilesFixed());

    // Corrupt parity blocks for different stripes.
    int[] corruptBlockIdxs = new int[]{0, 1, 2};
    for (int idx: corruptBlockIdxs)
      corruptBlock(locs.get(idx).getBlock(), dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(dfs, parityFile, corruptBlockIdxs,
        2*blockSize);

    corruptFiles = DFSUtil.getCorruptFiles(dfs);
    assertEquals("file not corrupted",
                 1, corruptFiles.length);
    assertEquals("wrong file corrupted",
                 corruptFiles[0], parityFile.toUri().getPath());

    cnode = RaidNode.createRaidNode(null, localConf);
    long start = System.currentTimeMillis();
    while (cnode.blockIntegrityMonitor.getNumFilesFixed() < 1 &&
           System.currentTimeMillis() - start < 120000) {
      LOG.info("Test " + testName + " waiting for files to be fixed.");
      Thread.sleep(3000);
    }
    TestBlockFixer.verifyMetrics(fileSys, cnode, local, 1L, 
        corruptBlockIdxs.length);

    long checkCRC = RaidDFSUtil.getCRC(fileSys, parityFile);

    assertEquals("file not fixed",
                 parityCRC, checkCRC);

  } catch (Exception e) {
    LOG.info("Test " + testName + " Exception " + e +
             StringUtils.stringifyException(e));
    throw e;
  } finally {
    myTearDown();
  }
  LOG.info("Test " + testName + " completed.");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:73,代码来源:TestDirectoryBlockFixer.java

示例13: testMultiplePriorities

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
public void testMultiplePriorities() throws Exception {
  LOG.info("Test testMultiplePriorities started.");
  Path srcFile = new Path("/home/test/file1");
  int repl = 1;
  int numBlocks = 8;
  long blockSize = 16384;
  int stripeLength = 3;
  Path destPath = new Path("/destraidrs");
  mySetup(stripeLength, -1); // never har
  Codec codec = Codec.getCodec("rs");
  LOG.info("Starting testMultiplePriorities");
  try {
    // Create test file and raid it.
    TestRaidDfs.createTestFilePartialLastBlock(
      fileSys, srcFile, repl, numBlocks, blockSize);
    FileStatus stat = fileSys.getFileStatus(srcFile);
    RaidNode.doRaid(conf, stat,
      destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
      false, repl, repl);

    // Corrupt first block of file.
    int blockIdxToCorrupt = 1;
    LOG.info("Corrupt block " + blockIdxToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile, stat.getLen());
    corruptBlock(locations.get(blockIdxToCorrupt).getBlock(),
        dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(fileSys, srcFile, new int[]{1}, blockSize);

    // Create Block Fixer and fix.
    FakeDistBlockIntegrityMonitor distBlockFixer = new FakeDistBlockIntegrityMonitor(conf);
    assertEquals(0, distBlockFixer.submittedJobs.size());

    // waiting for one job to submit
    long startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() - startTime < 120000 &&
           distBlockFixer.submittedJobs.size() == 0) { 
      distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
      LOG.info("Waiting for jobs to submit");
      Thread.sleep(10000);
    }
    int submittedJob = distBlockFixer.submittedJobs.size();
    LOG.info("Already Submitted " + submittedJob + " jobs");
    assertTrue("Should submit more than 1 jobs", submittedJob >= 1);

    // Corrupt one more block.
    blockIdxToCorrupt = 4;
    LOG.info("Corrupt block " + blockIdxToCorrupt + " of file " + srcFile);
    locations = getBlockLocations(srcFile, stat.getLen());
    corruptBlock(locations.get(blockIdxToCorrupt).getBlock(),
        dfsCluster);
    RaidDFSUtil.reportCorruptBlocks(fileSys, srcFile, new int[]{4}, blockSize);

    // A new job should be submitted since two blocks are corrupt.
    startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() - startTime < 120000 &&
           distBlockFixer.submittedJobs.size() == submittedJob) { 
      distBlockFixer.getCorruptionMonitor().checkAndReconstructBlocks();
      LOG.info("Waiting for more jobs to submit");
      Thread.sleep(10000);
    }
    LOG.info("Already Submitted " + distBlockFixer.submittedJobs.size()  + " jobs");
    assertTrue("Should submit more than 1 jobs",
        distBlockFixer.submittedJobs.size() - submittedJob >= 1);
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:68,代码来源:TestBlockFixer.java

示例14: validateSingleFile

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
private void validateSingleFile(String code, FileSystem fileSys, 
    Path sourceDir, int stripeLength, int blockNum, boolean lastPartial)
        throws Exception {
  LOG.info("Test file with " + blockNum + " blocks and " +
        (lastPartial? "partial": "full") + " last block");
  Codec codec = loadTestCodecs(code, stripeLength, true);
  Path parityDir = new Path(codec.parityDirectory);
  RaidDFSUtil.cleanUp(fileSys, sourceDir);
  RaidDFSUtil.cleanUp(fileSys, parityDir);
  fileSys.mkdirs(sourceDir);
  
  Path file1 = new Path(sourceDir, "file1");
  if (!lastPartial) {
    TestRaidDfs.createTestFile(fileSys, file1, 2, blockNum, 8192L);
  } else {
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, file1, 2,
        blockNum, 8192L);
  }
  Path parityFile = RaidNode.getOriginalParityFile(parityDir, sourceDir);
  // Do directory level raid
  LOG.info("Create a directory-raid parity file " + parityFile);
  assertTrue("Cannot raid directory " + sourceDir, 
      doRaid(conf, fileSys, sourceDir, codec));
  assertEquals("Modification time should be the same", 
      fileSys.getFileStatus(sourceDir).getModificationTime(),
      fileSys.getFileStatus(parityFile).getModificationTime());
  assertEquals("Replica num of source file should be reduced to 1",
      fileSys.getFileStatus(file1).getReplication(), 1);
  assertEquals("Replica num of parity file should be reduced to 1",
      fileSys.getFileStatus(parityFile).getReplication(), 1);
  long dirCRC = RaidDFSUtil.getCRC(fileSys, parityFile);
  long dirLen = fileSys.getFileStatus(parityFile).getLen();
  // remove the parity dir
  RaidDFSUtil.cleanUp(fileSys, parityDir);
  codec = loadTestCodecs(code, stripeLength, false);
  Path parityFile1 = RaidNode.getOriginalParityFile(parityDir,
      file1);
  LOG.info("Create a file-raid parity file " + parityFile1);
  assertTrue("Cannot raid file " + file1, 
      doRaid(conf, fileSys, file1, codec));
  assertTrue("Parity file doesn't match when the file has " + blockNum + 
      " blocks ", 
      TestRaidDfs.validateFile(fileSys, parityFile1, dirLen, dirCRC));
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:45,代码来源:TestDirectoryRaidEncoder.java

示例15: testOneFileDirectory

import org.apache.hadoop.hdfs.RaidDFSUtil; //导入依赖的package包/类
public void testOneFileDirectory() throws Exception {
  mySetup();
  int stripeLength = 4;
  try {
    for (String code: RaidDFSUtil.codes) {
      LOG.info("testOneFileDirectory: Test code " + code);
      Codec codec = loadTestCodecs(code, stripeLength, true);
      Path sourceDir = new Path("/user/raid", code);
      assertTrue(fileSys.mkdirs(sourceDir));
      Path twoBlockFile = new Path(sourceDir, "twoBlockFile");;
      LOG.info("Test one file with 2 blocks");
      TestRaidDfs.createTestFile(fileSys, twoBlockFile, 2, 2, 8192L);
      assertTrue(fileSys.exists(twoBlockFile));
      assertFalse("Not enough blocks in the directory",
          RaidNode.doRaid(conf, fileSys.getFileStatus(sourceDir),
              new Path(codec.parityDirectory), codec,
              new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
              false, 1, 1));
      fileSys.delete(twoBlockFile, true);
      
      LOG.info("Test one file with blocks less than one stripe");
      validateSingleFile(code, fileSys, sourceDir, stripeLength, 3,
          false);
      validateSingleFile(code, fileSys, sourceDir, stripeLength, 3,
          true);
      LOG.info("Test one file with one stripe blocks");
      validateSingleFile(code, fileSys, sourceDir, stripeLength,
          stripeLength, false);
      validateSingleFile(code, fileSys, sourceDir, stripeLength,
          stripeLength, true);
      
      LOG.info("Test one file with more than one stripe blocks");
      validateSingleFile(code, fileSys, sourceDir, stripeLength,
          stripeLength + 2, false);
      validateSingleFile(code, fileSys, sourceDir, stripeLength,
          stripeLength + 2, true);
    }
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:TestDirectoryRaidEncoder.java


注:本文中的org.apache.hadoop.hdfs.RaidDFSUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。