当前位置: 首页>>代码示例>>Java>>正文


Java RaidNode类代码示例

本文整理汇总了Java中org.apache.hadoop.raid.RaidNode的典型用法代码示例。如果您正苦于以下问题:Java RaidNode类的具体用法?Java RaidNode怎么用?Java RaidNode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RaidNode类属于org.apache.hadoop.raid包,在下文中一共展示了RaidNode类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: searchHarDir

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/**
 * search the Har-ed parity files
 */
private boolean searchHarDir(FileStatus stat) 
    throws IOException {
  if (!stat.isDir()) {
    return false;
  }
  String pattern = stat.getPath().toString() + "/*" + RaidNode.HAR_SUFFIX 
      + "*";
  FileStatus[] stats = globStatus(new Path(pattern));
  if (stats != null && stats.length > 0) {
    return true;
  }
    
  stats = fs.listStatus(stat.getPath());
 
  // search deeper.
  for (FileStatus status : stats) {
    if (searchHarDir(status)) {
      return true;
    }
  }
  return false;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:DistributedRaidFileSystem.java

示例2: testFileBlockMissing

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/**
 * checks fsck with missing all blocks in files but not in parity files 
 * Because parity stripe length is 3, we don't corrupt any files.
 */
@Test
public void testFileBlockMissing() throws Exception {
  LOG.info("testFileBlockMissing");
  int rsParityLength = 3;
  setUpCluster(rsParityLength);
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, 0);
  LOG.info("Corrupt all blocks in all source files");
  for (int i = 0; i < files.length; i++) {
    long blockNum = RaidNode.getNumBlocks(srcStats[i]);
    for (int j = 0; j < blockNum; j++) {
      removeAndReportBlock(cluster, srcStats[i], new int[]{j});
    }
  }
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, files.length);
  assertEquals(0, ToolRunner.run(shell, args));
  int result = shell.getCorruptCount();
  assertEquals("fsck should return 0", 0, result);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:TestDirectoryRaidShellFsck.java

示例3: testParityBlockMissing

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/**
 * checks fsck with missing all blocks in parity block but not in file block
 * Raid fsck actually skips all parity files.
 */
@Test
public void testParityBlockMissing() throws Exception {
  LOG.info("testParityBlockMissing");
  int rsParityLength = 3;
  setUpCluster(rsParityLength);
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, 0);
  long blockNum = RaidNode.getNumBlocks(parityStat);
  LOG.info("Corrupt all blocks in parity file");
  for (int i = 0; i < blockNum; i++) {
    removeAndReportBlock(cluster, parityStat, new int[]{i});
  }
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, 1);

  assertEquals(0, ToolRunner.run(shell, args));
  int result = shell.getCorruptCount();

  assertEquals("fsck should return 0", 0, result);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:TestDirectoryRaidShellFsck.java

示例4: testFileBlockMissing

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/**
 * checks fsck with missing all blocks in files but not in parity files 
 * Because parity stripe length is 3, we don't corrupt any files.
 */
@Test
public void testFileBlockMissing() throws Exception {
  LOG.info("testFileBlockMissing");
  int rsParityLength = 3;
  setUpCluster(rsParityLength);
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, 0);
  LOG.info("Corrupt all blocks in all source files");
  for (int i = 0; i < files.length; i++) {
    long blockNum = RaidNode.getNumBlocks(srcStats[i]);
    for (int j = 0; j < blockNum; j++) {
      removeAndReportBlock(cluster, srcStats[i], new int[]{j});
    }
  }
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, files.length);
  ToolRunner.run(shell, args);
  int result = shell.getCorruptCount();
  assertEquals("fsck should return 0", 0, result);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:23,代码来源:TestDirectoryRaidShellFsck.java

示例5: testParityBlockMissing

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/**
 * checks fsck with missing all blocks in parity block but not in file block
 * Raid fsck actually skips all parity files.
 */
@Test
public void testParityBlockMissing() throws Exception {
  LOG.info("testParityBlockMissing");
  int rsParityLength = 3;
  setUpCluster(rsParityLength);
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, 0);
  long blockNum = RaidNode.getNumBlocks(parityStat);
  LOG.info("Corrupt all blocks in parity file");
  for (int i = 0; i < blockNum; i++) {
    removeAndReportBlock(cluster, parityStat, new int[]{i});
  }
  TestRaidShellFsck.waitUntilCorruptFileCount(dfs, 1);

  ToolRunner.run(shell, args);
  int result = shell.getCorruptCount();

  assertEquals("fsck should return 0", 0, result);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:23,代码来源:TestDirectoryRaidShellFsck.java

示例6: filterUnfixableSourceFiles

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
void filterUnfixableSourceFiles(Iterator<Path> it) throws IOException {
  String xorPrefix = RaidNode.xorDestinationPath(getConf()).toUri().getPath();
  if (!xorPrefix.endsWith(Path.SEPARATOR)) {
    xorPrefix += Path.SEPARATOR;
  }
  String rsPrefix = RaidNode.rsDestinationPath(getConf()).toUri().getPath();
  if (!rsPrefix.endsWith(Path.SEPARATOR)) {
    rsPrefix += Path.SEPARATOR;
  }
  String[] destPrefixes = new String[]{xorPrefix, rsPrefix};
  while (it.hasNext()) {
    Path p = it.next();
    if (isSourceFile(p, destPrefixes) &&
        RaidNode.xorParityForSource(p, getConf()) == null &&
        RaidNode.rsParityForSource(p, getConf()) == null) {
      it.remove();
    }
  }
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:20,代码来源:BlockFixer.java

示例7: BlockFixerHelper

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
public BlockFixerHelper(Configuration conf) throws IOException {
  super(conf);

  xorPrefix = RaidNode.xorDestinationPath(getConf()).toUri().getPath();
  if (!xorPrefix.endsWith(Path.SEPARATOR)) {
    xorPrefix += Path.SEPARATOR;
  }
  rsPrefix = RaidNode.rsDestinationPath(getConf()).toUri().getPath();
  if (!rsPrefix.endsWith(Path.SEPARATOR)) {
    rsPrefix += Path.SEPARATOR;
  }
  int stripeLength = RaidNode.getStripeLength(getConf());
  xorEncoder = new XOREncoder(getConf(), stripeLength);
  xorDecoder = new XORDecoder(getConf(), stripeLength);
  int parityLength = RaidNode.rsParityLength(getConf());
  rsEncoder = new ReedSolomonEncoder(getConf(), stripeLength, parityLength);
  rsDecoder = new ReedSolomonDecoder(getConf(), stripeLength, parityLength);

}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:20,代码来源:BlockFixer.java

示例8: corruptBlockAndValidate

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
private void corruptBlockAndValidate(Path srcFile, Path destPath,
  int[] listBlockNumToCorrupt, long blockSize, int numBlocks)
throws IOException, InterruptedException {
  int repl = 1;
  long crc = createTestFilePartialLastBlock(fileSys, srcFile, repl,
                numBlocks, blockSize);
  long length = fileSys.getFileStatus(srcFile).getLen();

  RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile),
    destPath, code, new RaidNode.Statistics(), new RaidUtils.DummyProgressable(),
    false, repl, repl, stripeLength);

  // Delete first block of file
  for (int blockNumToCorrupt : listBlockNumToCorrupt) {
    LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile);
    corruptBlock(dfs, srcFile, locations.get(blockNumToCorrupt).getBlock(),
          NUM_DATANODES, true);
  }

  // Validate
  DistributedRaidFileSystem raidfs = getRaidFS();
  assertTrue(validateFile(raidfs, srcFile, length, crc));
  validateLogFile(getRaidFS(), new Path(LOG_DIR));
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:26,代码来源:TestRaidDfs.java

示例9: setupCluster

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
protected void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set("dfs.replication.pending.timeout.sec", "2");
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1L);
  conf.set("dfs.block.replicator.classname",
           "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");
  conf.set(RaidNode.STRIPE_LENGTH_KEY, "2");
  conf.set(RaidNode.RS_PARITY_LENGTH_KEY, "3");
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
  // start the cluster with one datanode first
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).
      format(true).racks(rack1).hosts(host1).build();
  cluster.waitActive();
  namesystem = cluster.getNameNode().getNamesystem();
  Assert.assertTrue("BlockPlacementPolicy type is not correct.",
    namesystem.blockManager.replicator instanceof BlockPlacementPolicyRaid);
  policy = (BlockPlacementPolicyRaid) namesystem.blockManager.replicator;
  fs = cluster.getFileSystem();
  xorPrefix = RaidNode.xorDestinationPath(conf).toUri().getPath();
  raidTempPrefix = RaidNode.xorTempPrefix(conf);
  raidrsTempPrefix = RaidNode.rsTempPrefix(conf);
  raidrsHarTempPrefix = RaidNode.rsHarTempPrefix(conf);
}
 
开发者ID:rekhajoshm,项目名称:mapreduce-fork,代码行数:25,代码来源:TestBlockPlacementPolicyRaid.java

示例10: ParityVerifier

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
public ParityVerifier(Configuration conf, boolean restoreReplication, int
    replicationLimit, Codec code) {
  this.code = code;
  this.conf = conf;
  this.directoryTraversalShuffle =
      conf.getBoolean(RaidNode.RAID_DIRECTORYTRAVERSAL_SHUFFLE, true);
  this.directoryTraversalThreads =
      conf.getInt(RaidNode.RAID_DIRECTORYTRAVERSAL_THREADS, 4);
  this.replicationLimit = (short)replicationLimit; 
  this.restoreReplication = restoreReplication;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:ParityVerifier.java

示例11: check

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
@Override
public boolean check(FileStatus parityStat) throws IOException {
  if (parityStat.isDir()) return false;
  Path parityPath = parityStat.getPath();
  FileSystem fs = parityPath.getFileSystem(conf);;
  String parityPathStr = parityPath.toUri().getPath();
  String src = parityPathStr.replaceFirst(code.getParityPrefix(),
      Path.SEPARATOR);
  Path srcPath = new Path(src);
  FileStatus srcStat;
  try {
    srcStat = fs.getFileStatus(srcPath);
  } catch (FileNotFoundException ioe) {
    return false;
  }
  if (!code.isDirRaid) {
    return checkSrc(srcStat, (short)limit, restoreReplication, fs);
  } else {
    List<FileStatus> stats = RaidNode.listDirectoryRaidFileStatus(conf,
        fs, srcPath);
    if (stats == null || stats.size() == 0) {
      return false;
    }
    boolean result = false;
    for (FileStatus stat : stats) {
      if (checkSrc(stat, (short)limit, restoreReplication, fs)) {
        result = true;
      }
    }
    return result;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:33,代码来源:ParityVerifier.java

示例12: initialize

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/** {@inheritDoc} */
@Override
public void initialize(Configuration conf,  FSClusterStats stats,
                       NetworkTopology clusterMap, HostsFileReader hostsReader,
                       DNSToSwitchMapping dnsToSwitchMapping, FSNamesystem namesystem) {
  super.initialize(conf, stats, clusterMap, 
                   hostsReader, dnsToSwitchMapping, namesystem);
  this.conf = conf;
  this.minFileSize = conf.getLong(RaidNode.MINIMUM_RAIDABLE_FILESIZE_KEY,
      RaidNode.MINIMUM_RAIDABLE_FILESIZE);
  this.namesystem = namesystem;
  this.cachedLocatedBlocks = new CachedLocatedBlocks(conf);
  this.cachedFullPathNames = new CachedFullPathNames(conf);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:BlockPlacementPolicyRaid.java

示例13: doRaid

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
private void doRaid(Path srcPath, Codec codec) throws IOException {
  RaidNode.doRaid(conf, fileSys.getFileStatus(srcPath),
            new Path("/raid"), codec, 
            new RaidNode.Statistics(), 
              RaidUtils.NULL_PROGRESSABLE,
              false, 1, 1);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:TestRaidShell.java

示例14: doRaid

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
private void doRaid(Path srcPath, Codec codec) throws IOException {
  RaidNode.doRaid(conf, fileSys.getFileStatus(srcPath),
            new Path(codec.parityDirectory), codec, 
            new RaidNode.Statistics(), 
              RaidUtils.NULL_PROGRESSABLE,
              false, 1, 1);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:TestFastFileCheck.java

示例15: setUpCluster

import org.apache.hadoop.raid.RaidNode; //导入依赖的package包/类
/**
 * creates a MiniDFS instance with a raided file in it
 */
public void setUpCluster(int rsPairtyLength, long[] fileSizes,
    long[] blockSizes) throws IOException, ClassNotFoundException {
  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  Utils.loadTestCodecs(conf, STRIPE_BLOCKS, STRIPE_BLOCKS, 1, rsPairtyLength,
      "/destraid", "/destraidrs", false, true);
  conf.setBoolean("dfs.permissions", false);
  cluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  cluster.waitActive();
  dfs = (DistributedFileSystem) cluster.getFileSystem();
  TestDirectoryRaidDfs.setupStripeStore(conf, dfs);    
  String namenode = dfs.getUri().toString();
  FileSystem.setDefaultUri(conf, namenode);
  Codec dirRS = Codec.getCodec("rs");
  long[] crcs = new long[fileSizes.length];
  int[] seeds = new int[fileSizes.length];
  files = TestRaidDfs.createTestFiles(srcDir, fileSizes,
    blockSizes, crcs, seeds, (FileSystem)dfs, (short)1);
  assertTrue(RaidNode.doRaid(conf, dfs.getFileStatus(srcDir),
    new Path(dirRS.parityDirectory), dirRS,
    new RaidNode.Statistics(),
    RaidUtils.NULL_PROGRESSABLE,
    false, 1, 1));
  srcStats = new FileStatus[files.length];
  for (int i = 0 ; i < files.length; i++) {
    srcStats[i] = dfs.getFileStatus(files[i]);
  }
  parityStat = dfs.getFileStatus(parityFile);
  clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl",
                 "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl",
                 "org.apache.hadoop.hdfs.DistributedFileSystem");
  // prepare shell and arguments
  shell = new RaidShell(clientConf);
  args = new String[2];
  args[0] = "-fsck";
  args[1] = "/";
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:43,代码来源:TestDirectoryRaidShellFsck.java


注:本文中的org.apache.hadoop.raid.RaidNode类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。