当前位置: 首页>>代码示例>>Java>>正文


Java RaidUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.raid.RaidUtils的典型用法代码示例。如果您正苦于以下问题:Java RaidUtils类的具体用法?Java RaidUtils怎么用?Java RaidUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RaidUtils类属于org.apache.hadoop.raid包,在下文中一共展示了RaidUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testTrashFilter

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
/**
 * Test the filtering of trash files from the list of corrupt files.
 */
@Test
public void testTrashFilter() {
  List<Path> files = new LinkedList<Path>();
  // Paths that do not match the trash pattern.
  Path p1 = new Path("/user/raid/raidtest/f1");
  Path p2 = new Path("/user/.Trash/"); 
  // Paths that match the trash pattern.
  Path p3 = new Path("/user/raid/.Trash/raidtest/f1");
  Path p4 = new Path("/user/raid/.Trash/");
  files.add(p1);
  files.add(p3);
  files.add(p4);
  files.add(p2);

  Configuration conf = new Configuration();
  RaidUtils.filterTrash(conf, files);

  assertEquals("expected 2 non-trash files but got " + files.size(),
               2, files.size());
  for (Path p: files) {
    assertTrue("wrong file returned by filterTrash",
               p == p1 || p == p2);
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:28,代码来源:TestBlockFixer.java

示例2: corruptBlockAndValidate

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void corruptBlockAndValidate(Path srcFile, Path destPath,
  int[] listBlockNumToCorrupt, long blockSize, int numBlocks)
throws IOException, InterruptedException {
  int repl = 1;
  long crc = createTestFilePartialLastBlock(fileSys, srcFile, repl,
                numBlocks, blockSize);
  long length = fileSys.getFileStatus(srcFile).getLen();

  RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile),
    destPath, code, new RaidNode.Statistics(), new RaidUtils.DummyProgressable(),
    false, repl, repl, stripeLength);

  // Delete first block of file
  for (int blockNumToCorrupt : listBlockNumToCorrupt) {
    LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile);
    corruptBlock(dfs, srcFile, locations.get(blockNumToCorrupt).getBlock(),
          NUM_DATANODES, true);
  }

  // Validate
  DistributedRaidFileSystem raidfs = getRaidFS();
  assertTrue(validateFile(raidfs, srcFile, length, crc));
  validateLogFile(getRaidFS(), new Path(LOG_DIR));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:26,代码来源:TestRaidDfs.java

示例3: doRaid

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void doRaid(Path srcPath, Codec codec) throws IOException {
  RaidNode.doRaid(conf, fileSys.getFileStatus(srcPath),
            new Path(codec.parityDirectory), codec, 
            new RaidNode.Statistics(), 
              RaidUtils.NULL_PROGRESSABLE,
              false, 1, 1);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:TestFastFileCheck.java

示例4: testVerifySourceFile

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
public void testVerifySourceFile() throws Exception {
  mySetup();
  
  try {
    Path srcPath = new Path("/user/dikang/raidtest/file0");
    int numBlocks = 8;
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
        1, numBlocks, 8192L);
    assertTrue(fileSys.exists(srcPath));
    Codec codec = Codec.getCodec("rs");
    FileStatus stat = fileSys.getFileStatus(srcPath);
    
    // verify good file
    assertTrue(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, null, codec, 
        RaidUtils.NULL_PROGRESSABLE, true));
    
    // verify bad file
    LocatedBlocks fileLoc =
        RaidDFSUtil.getBlockLocations((DistributedFileSystem)fileSys, 
            srcPath.toUri().getPath(),
            0, stat.getLen());
    // corrupt file1
    Random rand = new Random();
    int idx = rand.nextInt(numBlocks);
    TestRaidDfs.corruptBlock(srcPath, 
        fileLoc.getLocatedBlocks().get(idx).getBlock(), 
        NUM_DATANODES, true, dfs);
    
    assertFalse(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, null, codec, 
        RaidUtils.NULL_PROGRESSABLE, true));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TestFastFileCheck.java

示例5: testVerifyFile

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
public void testVerifyFile() throws Exception {
  mySetup();
  
  try {
    Path srcPath = new Path("/user/dikang/raidtest/file0");
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
        1, 8, 8192L);
    
    assertTrue(fileSys.exists(srcPath));
    Codec codec = Codec.getCodec("rs");

    // generate the parity files.
    doRaid(srcPath, codec);
    FileStatus stat = fileSys.getFileStatus(srcPath);
    
    // verify the GOOD_FILE
    ParityFilePair pfPair = ParityFilePair.getParityFile(codec, stat, conf);
    assertNotNull(pfPair);
    assertTrue(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, pfPair.getPath(), codec, 
        RaidUtils.NULL_PROGRESSABLE, false));
    
    // verify the BAD_FILE
    fileSys.delete(srcPath);
    TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
        1, 8, 8192L);
    fileSys.setTimes(pfPair.getPath(), 
        fileSys.getFileStatus(srcPath).getModificationTime(), -1);
    stat = fileSys.getFileStatus(srcPath);
    pfPair = ParityFilePair.getParityFile(codec, stat, conf);
    assertNotNull(pfPair);
    assertFalse(FastFileCheck.checkFile(conf, (DistributedFileSystem)fileSys, 
        fileSys, srcPath, pfPair.getPath(), codec, 
        RaidUtils.NULL_PROGRESSABLE, false));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:TestFastFileCheck.java

示例6: corruptBlockAndValidate

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void corruptBlockAndValidate(Path srcFile, Path destPath,
  int[] listBlockNumToCorrupt, long blockSize, int numBlocks,
  MiniDFSCluster cluster)
throws IOException, InterruptedException {
  RaidDFSUtil.cleanUp(fileSys, srcFile.getParent());
  fileSys.mkdirs(srcFile.getParent());
  int repl = 1;
  long crc = createTestFilePartialLastBlock(fileSys, srcFile, repl,
                numBlocks, blockSize);
  long length = fileSys.getFileStatus(srcFile).getLen();

  if (codec.isDirRaid) {
    RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile.getParent()),
    destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
    false, repl, repl);
  } else {
    RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile),
    destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
    false, repl, repl);
  }

  // Delete first block of file
  for (int blockNumToCorrupt : listBlockNumToCorrupt) {
    LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile);
    corruptBlock(srcFile, locations.get(blockNumToCorrupt).getBlock(),
          NUM_DATANODES, true, cluster);
  }

  // Validate
  DistributedRaidFileSystem raidfs = getRaidFS();
  assertTrue(validateFile(raidfs, srcFile, length, crc));
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:TestRaidDfs.java

示例7: testTooManyErrorsEncodeCore

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void testTooManyErrorsEncodeCore(boolean isDirRaid)
    throws Exception {
  long blockSize = 8192L;
  int numBlocks = 8;
  stripeLength = 3;
  mySetup("xor", 1, isDirRaid);
  // Encoding with XOR should fail when even one block is corrupt.
  try {
    Path destPath = new Path("/destraid/user/dhruba/raidtest");
      Path file = new Path("/user/dhruba/raidtest/file1");
      int repl = 1;
      createTestFilePartialLastBlock(fileSys, file, repl, numBlocks, blockSize);

      int blockNumToCorrupt = 0;
      LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + file);
      LocatedBlocks locations = getBlockLocations(file);
      removeAndReportBlock((DistributedFileSystem)fileSys,
          file, locations.get(blockNumToCorrupt),
          dfs);

    boolean expectedExceptionThrown = false;
    try {
      if (isDirRaid) {
        RaidNode.doRaid(conf, fileSys.getFileStatus(file.getParent()),
            destPath, codec, new RaidNode.Statistics(),
            RaidUtils.NULL_PROGRESSABLE, false, repl, repl);
      } else {
        RaidNode.doRaid(conf, fileSys.getFileStatus(file),
          destPath, codec, new RaidNode.Statistics(),
          RaidUtils.NULL_PROGRESSABLE, false, repl, repl);
      }
    } catch (IOException e) {
      LOG.info("Expected exception caught" + e);
      expectedExceptionThrown = true;
    }
    assertTrue(expectedExceptionThrown);
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:41,代码来源:TestRaidDfs.java

示例8: testTooManyErrorsEncodeRSCore

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void testTooManyErrorsEncodeRSCore(boolean isDirRaid) throws Exception {
  long blockSize = 8192L;
  int numBlocks = 8;
  stripeLength = 3;
  mySetup("rs", 1, isDirRaid);
  // Encoding with RS should fail when even one block is corrupt.
  try {
    Path destPath = new Path("/destraidrs/user/dhruba/raidtest");
      Path file = new Path("/user/dhruba/raidtest/file2");
      int repl = 1;
      createTestFilePartialLastBlock(fileSys, file, repl, numBlocks, blockSize);

      int blockNumToCorrupt = 0;
      LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + file);
      LocatedBlocks locations = getBlockLocations(file);
      removeAndReportBlock((DistributedFileSystem)fileSys,
          file, locations.get(blockNumToCorrupt),
          dfs);

    boolean expectedExceptionThrown = false;
    try {
      if (isDirRaid) {
        RaidNode.doRaid(conf, fileSys.getFileStatus(file.getParent()),
            destPath, codec, new RaidNode.Statistics(),
            RaidUtils.NULL_PROGRESSABLE, false, repl, repl);
      } else {
        RaidNode.doRaid(conf, fileSys.getFileStatus(file),
          destPath, codec, new RaidNode.Statistics(),
          RaidUtils.NULL_PROGRESSABLE, false, repl, repl);
      }
    } catch (IOException e) {
      expectedExceptionThrown = true;
      LOG.info("Expected exception caught" + e);
    }
    assertTrue(expectedExceptionThrown);
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestRaidDfs.java

示例9: testTooManyErrorsEncodeCore

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void testTooManyErrorsEncodeCore(boolean isDirRaid)
    throws Exception {
  long blockSize = 8192L;
  int numBlocks = 8;
  stripeLength = 3;
  mySetup("xor", 1, isDirRaid);
  // Encoding with XOR should fail when even one block is corrupt.
  try {
    Path destPath = new Path("/destraid/user/dhruba/raidtest");
      Path file = new Path("/user/dhruba/raidtest/file1");
      int repl = 1;
      createTestFilePartialLastBlock(fileSys, file, repl, numBlocks, blockSize);

      int blockNumToCorrupt = 0;
      LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + file);
      LocatedBlocks locations = getBlockLocations(file);
      corruptBlock(file, locations.get(blockNumToCorrupt).getBlock(),
          NUM_DATANODES, true, dfs);

    boolean expectedExceptionThrown = false;
    try {
      if (isDirRaid) {
        RaidNode.doRaid(conf, fileSys.getFileStatus(file.getParent()),
            destPath, codec, new RaidNode.Statistics(),
            RaidUtils.NULL_PROGRESSABLE, false, repl, repl);
      } else {
        RaidNode.doRaid(conf, fileSys.getFileStatus(file),
          destPath, codec, new RaidNode.Statistics(),
          RaidUtils.NULL_PROGRESSABLE, false, repl, repl);
      }
    } catch (IOException e) {
      LOG.info("Expected exception caught" + e);
      expectedExceptionThrown = true;
    }
    assertTrue(expectedExceptionThrown);
  } finally {
    myTearDown();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:40,代码来源:TestRaidDfs.java

示例10: testTooManyErrorsEncodeRSCore

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
private void testTooManyErrorsEncodeRSCore(boolean isDirRaid) throws Exception {
  long blockSize = 8192L;
  int numBlocks = 8;
  stripeLength = 3;
  mySetup("rs", 1, isDirRaid);
  // Encoding with RS should fail when even one block is corrupt.
  try {
    Path destPath = new Path("/destraidrs/user/dhruba/raidtest");
      Path file = new Path("/user/dhruba/raidtest/file2");
      int repl = 1;
      createTestFilePartialLastBlock(fileSys, file, repl, numBlocks, blockSize);

      int blockNumToCorrupt = 0;
      LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + file);
      LocatedBlocks locations = getBlockLocations(file);
      corruptBlock(file, locations.get(blockNumToCorrupt).getBlock(),
          NUM_DATANODES, true, dfs);

    boolean expectedExceptionThrown = false;
    try {
      RaidNode.doRaid(conf, fileSys.getFileStatus(file),
        destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
        false, repl, repl);
    } catch (IOException e) {
      expectedExceptionThrown = true;
      LOG.info("Expected exception caught" + e);
    }
    assertTrue(expectedExceptionThrown);
  } finally {
    myTearDown();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:33,代码来源:TestRaidDfs.java

示例11: getCorruptFiles

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
/**
 * @return A list of corrupt files as obtained from the namenode
 */
List<Path> getCorruptFiles() throws IOException {
  DistributedFileSystem dfs = helper.getDFS(new Path("/"));

  String[] files = RaidDFSUtil.getCorruptFiles(dfs);
  List<Path> corruptFiles = new LinkedList<Path>();
  for (String f: files) {
    Path p = new Path(f);
    if (!history.containsKey(p.toString())) {
      corruptFiles.add(p);
    }
  }
  RaidUtils.filterTrash(getConf(), corruptFiles);
  return corruptFiles;
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:18,代码来源:LocalBlockFixer.java

示例12: testReadFully

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
/**
 * Test DistributedRaidFileSystem.readFully()
 */
public void testReadFully() throws Exception {
  code = ErasureCodeType.XOR;
  stripeLength = 3;
  mySetup("xor", 1);

  try {
    Path file = new Path("/user/raid/raidtest/file1");
    long crc = createTestFile(fileSys, file, 1, 8, 8192L);
    FileStatus stat = fileSys.getFileStatus(file);
    LOG.info("Created " + file + ", crc=" + crc + ", len=" + stat.getLen());

    byte[] filebytes = new byte[(int)stat.getLen()];
    // Test that readFully returns the correct CRC when there are no errors.
    DistributedRaidFileSystem raidfs = getRaidFS();
    FSDataInputStream stm = raidfs.open(file);
    stm.readFully(0, filebytes);
    assertEquals(crc, bufferCRC(filebytes));
    stm.close();

    // Generate parity.
    RaidNode.doRaid(conf, fileSys.getFileStatus(file),
      new Path("/destraid"), code, new RaidNode.Statistics(),
      new RaidUtils.DummyProgressable(),
      false, 1, 1, stripeLength);
    int[] corrupt = {0, 4, 7}; // first, last and middle block
    for (int blockIdx : corrupt) {
      LOG.info("Corrupt block " + blockIdx + " of file " + file);
      LocatedBlocks locations = getBlockLocations(file);
      corruptBlock(dfs, file, locations.get(blockIdx).getBlock(),
          NUM_DATANODES, true);
    }
    // Test that readFully returns the correct CRC when there are errors.
    stm = raidfs.open(file);
    stm.readFully(0, filebytes);
    assertEquals(crc, bufferCRC(filebytes));
  } finally {
    myTearDown();
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:43,代码来源:TestRaidDfs.java

示例13: testAccessTime

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
/**
 * Test that access time and mtime of a source file do not change after
 * raiding.
 */
public void testAccessTime() throws Exception {
  LOG.info("Test testAccessTime started.");

  code = ErasureCodeType.XOR;
  long blockSize = 8192L;
  int numBlocks = 8;
  int repl = 1;
  stripeLength = 3;
  mySetup("xor", 1);

  Path file = new Path("/user/dhruba/raidtest/file");
  createTestFilePartialLastBlock(fileSys, file, repl, numBlocks, blockSize);
  FileStatus stat = fileSys.getFileStatus(file);

  try {
    RaidNode.doRaid(conf, fileSys.getFileStatus(file),
      new Path("/destraid"), code, new RaidNode.Statistics(),
      new RaidUtils.DummyProgressable(), false, repl, repl, stripeLength);

    FileStatus newStat = fileSys.getFileStatus(file);

    assertEquals(stat.getModificationTime(), newStat.getModificationTime());
    assertEquals(stat.getAccessTime(), newStat.getAccessTime());
  } finally {
    myTearDown();
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:32,代码来源:TestRaidDfs.java

示例14: verifyFile

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
/**
 * Verify the certain offset of a file.
 */
private static boolean verifyFile(Configuration conf,
    FileSystem srcFs, FileSystem parityFs,
    FileStatus stat, Path parityPath, Codec codec,
    long blockOffset, Progressable reporter) 
  throws IOException, InterruptedException {
  Path srcPath = stat.getPath();
  LOG.info("Verify file: " + srcPath + " at offset: " + blockOffset);
  int limit = (int) Math.min(stat.getBlockSize(), DEFAULT_VERIFY_LEN);
  if (reporter == null) {
    reporter = RaidUtils.NULL_PROGRESSABLE;
  }
  
  // try to decode.
  Decoder decoder = new Decoder(conf, codec);
  if (codec.isDirRaid) {
    decoder.connectToStore(srcPath);
  }
  
  List<Long> errorOffsets = new ArrayList<Long>();
  // first limit bytes
  errorOffsets.add(blockOffset);
  long left = Math.min(stat.getBlockSize(), stat.getLen() - blockOffset);
  if (left > limit) {
    // last limit bytes
    errorOffsets.add(blockOffset + left - limit);
    // random limit bytes.
    errorOffsets.add(blockOffset + 
        rand.nextInt((int)(left - limit)));
  }
 
  byte[] buffer = new byte[limit];
  FSDataInputStream is = srcFs.open(srcPath);
  try {
    for (long errorOffset : errorOffsets) {
      is.seek(errorOffset);
      is.read(buffer);
      // calculate the oldCRC.
      CRC32 oldCrc = new CRC32();
      oldCrc.update(buffer);
      
      CRC32 newCrc = new CRC32();
      DecoderInputStream stream = decoder.new DecoderInputStream(
          RaidUtils.NULL_PROGRESSABLE, limit, stat.getBlockSize(), errorOffset, 
          srcFs, srcPath, parityFs, parityPath, null, null, false);
      try {
        stream.read(buffer);
        newCrc.update(buffer);
        if (oldCrc.getValue() != newCrc.getValue()) {
          LogUtils.logFileCheckMetrics(LOGRESULTS.FAILURE, codec, srcPath, 
              srcFs, errorOffset, limit, null, reporter);
          LOG.error("mismatch crc, old " + oldCrc.getValue() + 
              ", new " + newCrc.getValue() + ", for file: " + srcPath
              + " at offset " + errorOffset + ", read limit " + limit);
          return false;
        }
      } finally {
        reporter.progress();
        if (stream != null) {
          stream.close();
        }
      }
    }
    return true;
  } finally {
    is.close();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:71,代码来源:FastFileCheck.java

示例15: testReadFullyCore

import org.apache.hadoop.raid.RaidUtils; //导入依赖的package包/类
/**
 * Test DistributedRaidFileSystem.readFully()
 */
private void testReadFullyCore(boolean isDirRaid) throws Exception {
  stripeLength = 3;
  mySetup("xor", 1, isDirRaid);

  try {
    Path file = new Path("/user/raid/raidtest/file1");
    long crc = createTestFile(fileSys, file, 1, 8, 8192L);
    FileStatus stat = fileSys.getFileStatus(file);
    LOG.info("Created " + file + ", crc=" + crc + ", len=" + stat.getLen());

    byte[] filebytes = new byte[(int)stat.getLen()];
    // Test that readFully returns the correct CRC when there are no errors.
    DistributedRaidFileSystem raidfs = getRaidFS();
    FSDataInputStream stm = raidfs.open(file);
    stm.readFully(0, filebytes);
    assertEquals(crc, bufferCRC(filebytes));
    stm.close();
    
    // Generate parity.
    if (isDirRaid) {
      RaidNode.doRaid(conf, fileSys.getFileStatus(file.getParent()),
        new Path("/destraid"), codec, new RaidNode.Statistics(),
        RaidUtils.NULL_PROGRESSABLE,
        false, 1, 1);
    } else {
      RaidNode.doRaid(conf, fileSys.getFileStatus(file),
          new Path("/destraid"), codec, new RaidNode.Statistics(),
          RaidUtils.NULL_PROGRESSABLE,
          false, 1, 1);
    }
    int[] corrupt = {0, 4, 7}; // first, last and middle block
    for (int blockIdx : corrupt) {
      LOG.info("Corrupt block " + blockIdx + " of file " + file);
      LocatedBlocks locations = getBlockLocations(file);
      removeAndReportBlock((DistributedFileSystem)fileSys, 
          file, locations.get(blockIdx), dfs);
    }
    // Test that readFully returns the correct CRC when there are errors.
    stm = raidfs.open(file);
    stm.readFully(0, filebytes);
    assertEquals(crc, bufferCRC(filebytes));
  } finally {
    myTearDown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:49,代码来源:TestRaidDfs.java


注:本文中的org.apache.hadoop.raid.RaidUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。