当前位置: 首页>>代码示例>>Java>>正文


Java DistributedRaidFileSystem类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedRaidFileSystem的典型用法代码示例。如果您正苦于以下问题:Java DistributedRaidFileSystem类的具体用法?Java DistributedRaidFileSystem怎么用?Java DistributedRaidFileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DistributedRaidFileSystem类属于org.apache.hadoop.hdfs包,在下文中一共展示了DistributedRaidFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSrcStripes

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
/**
 * Helper function of Encoder.
 */
public List<List<Block>> getSrcStripes() {
  List<List<Block>> srcStripes = new ArrayList<List<Block>>();
  for (int i=0;i<srcStripeList.size();i++) {
    List<BlockInfo> biList=srcStripeList.get(i);
    List curSrcStripe = new ArrayList<Block>();
    for (int j=0;j<biList.size();j++) {
      int fileIdx = biList.get(j).fileIdx;
      int blockId = biList.get(j).blockId;
      FileStatus curFs = lfs.get(fileIdx);

      try {
        if (fs instanceof DistributedRaidFileSystem) {
          curSrcStripe.add(
              ((DistributedRaidFileSystem)fs).toDistributedFileSystem().getLocatedBlocks(curFs.getPath(),
              0L, curFs.getLen()).get(blockId).getBlock());
        }
      } catch (IOException e) {
        ;
      }
    }
    srcStripes.add(curSrcStripe);
  }
  return srcStripes;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:28,代码来源:DirectoryStripeReader.java

示例2: GenThread

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public GenThread(Configuration conf, Path input, Path output,
    RunTimeConstants rtc) throws IOException {
  this.inputPath = input;
  this.outputPath = output;
  this.fs = FileSystem.newInstance(conf);
  if (fs instanceof DistributedRaidFileSystem) {
    fs = ((DistributedRaidFileSystem)fs).getFileSystem();
  }
  this.buffer = new byte[rtc.buffer_size];
  if (test_buffer_size > rtc.buffer_size) 
    test_buffer_size = rtc.buffer_size;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:13,代码来源:GenThread.java

示例3: getEffectiveReplication

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public double getEffectiveReplication() {
  if (lastRaidStatistics == null) {
    return -1;
  }
  DFSClient dfs;
  double totalPhysical;
  try {
      /* Add by RH start */
      if(FileSystem.get(conf) instanceof DistributedRaidFileSystem){
          dfs = ((DistributedRaidFileSystem)FileSystem.get(conf)).getClient();
      }else{
          dfs = ((DistributedFileSystem)FileSystem.get(conf)).getClient();
      }
      /* Add by RH end */
      /* Commented by RH start */
    //dfs = ((DistributedFileSystem)FileSystem.get(conf)).getClient();
      /* Commented by RH end */
    totalPhysical = dfs.getNSDiskStatus().getDfsUsed();
  } catch (IOException e) {
    return -1;
  }
  double notRaidedPhysical = totalPhysical;
  double totalLogical = 0;
  for (Codec codec : Codec.getCodecs()) {
    String code = codec.id;
    Statistics st = lastRaidStatistics.get(code);
    totalLogical += st.getSourceCounters(RaidState.RAIDED).getNumLogical();
    notRaidedPhysical -= st.getSourceCounters(RaidState.RAIDED).getNumBytes();
    notRaidedPhysical -= st.getParityCounters().getNumBytes();
  }
  totalLogical += notRaidedPhysical / dfs.getDefaultReplication();
  if (totalLogical == 0) {
    // divided by 0
    return -1;
  }
  return totalPhysical / totalLogical;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:38,代码来源:StatisticsCollector.java

示例4: purgeParity

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void purgeParity(String cmd, String[] args, int startIndex)
    throws IOException {
  if (startIndex + 1 >= args.length) {
    printUsage(cmd);
    throw new IllegalArgumentException("Insufficient arguments");
  }
  Path parityPath = new Path(args[startIndex]);
  AtomicLong entriesProcessed = new AtomicLong(0);
  System.err.println("Starting recursive purge of " + parityPath);

  Codec codec = Codec.getCodec(args[startIndex + 1]);
  FileSystem srcFs = parityPath.getFileSystem(conf);
  if (srcFs instanceof DistributedRaidFileSystem) {
    srcFs = ((DistributedRaidFileSystem)srcFs).getFileSystem();
  }
  FileSystem parityFs = srcFs;
  String parityPrefix = codec.parityDirectory;
  DirectoryTraversal obsoleteParityFileRetriever =
    new DirectoryTraversal(
      "Purge File ",
      java.util.Collections.singletonList(parityPath),
      parityFs,
      new PurgeMonitor.PurgeParityFileFilter(conf, codec, null,
          srcFs, parityFs,
        parityPrefix, null, entriesProcessed),
      1,
      false);
  FileStatus obsolete = null;
  while ((obsolete = obsoleteParityFileRetriever.next()) !=
            DirectoryTraversal.FINISH_TOKEN) {
    PurgeMonitor.performDelete(parityFs, obsolete.getPath(), false);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:RaidShell.java

示例5: getParityBlocks

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
/**
 * gets the parity blocks corresponding to file
 * returns the parity blocks in case of DFS
 * and the part blocks containing parity blocks
 * in case of HAR FS
 */
private static BlockLocation[] getParityBlocks(final Path filePath,
                                        final long blockSize,
                                        final long numStripes,
                                        final RaidInfo raidInfo) 
  throws IOException {
  FileSystem parityFS = raidInfo.parityPair.getFileSystem();
  
  // get parity file metadata
  FileStatus parityFileStatus = raidInfo.parityPair.getFileStatus(); 
  long parityFileLength = parityFileStatus.getLen();

  if (parityFileLength != numStripes * raidInfo.parityBlocksPerStripe *
      blockSize) {
    throw new IOException("expected parity file of length" + 
                          (numStripes * raidInfo.parityBlocksPerStripe *
                           blockSize) +
                          " but got parity file of length " + 
                          parityFileLength);
  }

  BlockLocation[] parityBlocks = 
    parityFS.getFileBlockLocations(parityFileStatus, 0L, parityFileLength);
  
  if (parityFS instanceof DistributedFileSystem ||
      parityFS instanceof DistributedRaidFileSystem) {
    long parityBlockSize = parityFileStatus.getBlockSize();
    if (parityBlockSize != blockSize) {
      throw new IOException("file block size is " + blockSize + 
                            " but parity file block size is " + 
                            parityBlockSize);
    }
  } else if (parityFS instanceof HarFileSystem) {
    LOG.debug("HAR FS found");
  } else {
    LOG.warn("parity file system is not of a supported type");
  }
  
  return parityBlocks;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:46,代码来源:RaidUtils.java

示例6: getRaidFS

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
private DistributedRaidFileSystem getRaidFS() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
  Configuration clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl", 
      "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl", 
      "org.apache.hadoop.hdfs.DistributedFileSystem");
  clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
  URI dfsUri = dfs.getUri();
  return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:TestDirectoryReadConstruction.java

示例7: getRaidFS

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
private DistributedRaidFileSystem getRaidFS() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
  Configuration clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl", 
           "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl", 
           "org.apache.hadoop.hdfs.DistributedFileSystem");
  clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
  URI dfsUri = dfs.getUri();
  return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:TestParityMovement.java

示例8: testRenameHar

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void testRenameHar() throws Exception {
  try {
    mySetup("xor", 1);
    
    Path[] testPathList = new Path[] {
        new Path ("/user/dikang/raidtest/rename/f1"),
        new Path ("/user/dikang/raidtest/rename/f2"),
        new Path ("/user/dikang/raidtest/rename/f3")};
    
    Path destHarPath = new Path ("/destraid/user/dikang/raidtest/rename");
    
    DistributedRaidFileSystem raidFs = getRaidFS();
    for (Path srcPath : testPathList) {
      TestRaidDfs.createTestFilePartialLastBlock(fileSys, srcPath, 
          1, 8, 8192L);
    }
    
    raidFs.mkdirs(destHarPath);
    raidFs.mkdirs(new Path(destHarPath, "rename" + RaidNode.HAR_SUFFIX));
    
    raidFs.rename(new Path("/user/dikang/raidtest"), 
        new Path("/user/dikang/raidtest1"));
    fail("Expected fail for HAR rename");
  } catch (IOException ie) {
    String message = ie.getMessage();
    assertTrue(message.contains("HAR dir"));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:31,代码来源:TestParityMovement.java

示例9: testRename

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void testRename() throws Exception {
  try {
    
    long[] crcs = new long[3];
    int[] seeds = new int[3];
    short repl = 1;
    Path dirPath = new Path("/user/dikang/raidtest");
    
    mySetup();
    DistributedRaidFileSystem raidFs = getRaidFS();
    Path[] files = TestRaidDfs.createTestFiles(dirPath,
        fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
    FileStatus stat = raidFs.getFileStatus(dirPath); 
    Codec codec = Codec.getCodec("dir-rs");
    RaidNode.doRaid(conf, stat, new Path(codec.parityDirectory), codec,
        new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
      false, repl, repl);
    
    Path destPath = new Path("/user/dikang/raidtest_new");
    
    assertTrue(raidFs.exists(dirPath));
    assertFalse(raidFs.exists(destPath));
    
    ParityFilePair parity = ParityFilePair.getParityFile(
        codec, stat, conf);
    Path srcParityPath = parity.getPath();
    assertTrue(raidFs.exists(srcParityPath));
    // do the rename file
    assertTrue(raidFs.rename(dirPath, destPath));
    // verify the results.
    assertFalse(raidFs.exists(dirPath));
    assertTrue(raidFs.exists(destPath));
    assertFalse(raidFs.exists(srcParityPath));
    FileStatus srcDest = raidFs.getFileStatus(destPath);
    parity = ParityFilePair.getParityFile(codec, srcDest, conf);
    assertTrue(raidFs.exists(parity.getPath()));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:41,代码来源:TestDirectoryRaidParityMovement.java

示例10: testDeleteOneFile

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void testDeleteOneFile() throws Exception {
  try {
    long[] crcs = new long[3];
    int[] seeds = new int[3];
    short repl = 1;
    Path dirPath = new Path("/user/dikang/raidtest");
    
    mySetup();
    DistributedRaidFileSystem raidFs = getRaidFS();
    Path[] files = TestRaidDfs.createTestFiles(dirPath,
        fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
    FileStatus stat = raidFs.getFileStatus(dirPath); 
    Codec codec = Codec.getCodec("dir-rs");
    RaidNode.doRaid(conf, stat, new Path(codec.parityDirectory), codec,
        new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
      false, repl, repl);
    
    ParityFilePair parity = ParityFilePair.getParityFile(
        codec, stat, conf);
    Path srcParityPath = parity.getPath();
    assertTrue(raidFs.exists(srcParityPath));
    
    // delete one file
    assertTrue(raidFs.delete(files[0]));
    // verify the results
    assertFalse(raidFs.exists(files[0]));
    // we still have the parity file
    assertTrue(raidFs.exists(srcParityPath));
    
    // delete the left files
    assertTrue(raidFs.delete(files[1]));
    assertTrue(raidFs.delete(files[2]));
    
    // we will not touch the parity file.
    assertTrue(raidFs.exists(srcParityPath));
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestDirectoryRaidParityMovement.java

示例11: testDeleteDirRaidedFile

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void testDeleteDirRaidedFile() throws Exception {
  
  long[] crcs = new long[3];
  int[] seeds = new int[3];
  short repl = 1;
  Path dirPath = new Path("/user/dikang/raidtest");
  
  mySetup();
  //disable trash
  conf.setInt("fs.trash.interval", 0);
  
  DistributedRaidFileSystem raidFs = getRaidFS();
  Path[] files = TestRaidDfs.createTestFiles(dirPath,
      fileSizes, blockSizes, crcs, seeds, fileSys, (short)1);
  FileStatus stat = raidFs.getFileStatus(dirPath); 
  Codec codec = Codec.getCodec("dir-rs");
  RaidNode.doRaid(conf, stat, new Path(codec.parityDirectory), codec,
      new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
    false, repl, repl);
  
  try {
    raidFs.delete(files[0]);
    fail();
  } catch (Exception ex) {
    LOG.warn("Excepted error: " + ex.getMessage(), ex);
  } finally {
    stopCluster();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:30,代码来源:TestDirectoryRaidParityMovement.java

示例12: getRaidFS

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
static private DistributedRaidFileSystem getRaidFS(FileSystem fileSys,
    Configuration conf)
    throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
  Configuration clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
  clientConf.setBoolean("fs.hdfs.impl.disable.cache", true);
  URI dfsUri = dfs.getUri();
  return (DistributedRaidFileSystem)FileSystem.get(dfsUri, clientConf);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:TestDirectoryRaidDfs.java

示例13: purgeParity

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void purgeParity(String cmd, String[] args, int startIndex)
    throws IOException {
  if (startIndex + 1 >= args.length) {
    printUsage(cmd);
    throw new IllegalArgumentException("Insufficient arguments");
  }
  Path parityPath = new Path(args[startIndex]);
  AtomicLong entriesProcessed = new AtomicLong(0);
  System.err.println("Starting recursive purge of " + parityPath);

  Codec codec = Codec.getCodec(args[startIndex + 1]);
  FileSystem srcFs = parityPath.getFileSystem(conf);
  if (srcFs instanceof DistributedRaidFileSystem) {
    srcFs = ((DistributedRaidFileSystem)srcFs).getFileSystem();
  }
  FileSystem parityFs = srcFs;
  String parityPrefix = codec.parityDirectory;
  DirectoryTraversal obsoleteParityFileRetriever =
    new DirectoryTraversal(
      "Purge File ",
      java.util.Collections.singletonList(parityPath),
      parityFs,
      new PurgeMonitor.PurgeParityFileFilter(conf, codec, srcFs, parityFs,
        parityPrefix, null, entriesProcessed),
      1,
      false);
  FileStatus obsolete = null;
  while ((obsolete = obsoleteParityFileRetriever.next()) !=
            DirectoryTraversal.FINISH_TOKEN) {
    PurgeMonitor.performDelete(parityFs, obsolete.getPath(), false);
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:33,代码来源:RaidShell.java

示例14: getDFS

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
private static DistributedFileSystem getDFS(FileSystem fs)
  throws IOException {
  if (fs instanceof DistributedRaidFileSystem)
    fs = ((DistributedRaidFileSystem)fs).getFileSystem();
  return (DistributedFileSystem)fs;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:7,代码来源:DatanodeBenThread.java

示例15: checkFile

import org.apache.hadoop.hdfs.DistributedRaidFileSystem; //导入依赖的package包/类
public void checkFile(String cmd, String[] args, int startIndex)
    throws IOException {
  if (startIndex >= args.length) {
    printUsage(cmd);
    throw new IllegalArgumentException("Insufficient arguments");
  }
  for (int i = startIndex; i < args.length; i++) {
    Path p = new Path(args[i]);
    FileSystem fs = p.getFileSystem(conf);
    // if we got a raid fs, get the underlying fs 
    if (fs instanceof DistributedRaidFileSystem) {
      fs = ((DistributedRaidFileSystem) fs).getFileSystem();
    }
    // We should be able to cast at this point.
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    RemoteIterator<Path> corruptIt = dfs.listCorruptFileBlocks(p);
    int count = 0;
    while (corruptIt.hasNext()) {
      count++;
      Path corruptFile = corruptIt.next();
      // Result of checking.
      String result = null;
      FileStatus stat = fs.getFileStatus(corruptFile);
      if (stat.getReplication() < fs.getDefaultReplication()) {
        RaidInfo raidInfo = RaidUtils.getFileRaidInfo(stat, conf);
        if (raidInfo.codec == null) {
          result = "Below default replication but no parity file found";
        } else {
          boolean notRecoverable = isFileCorrupt(dfs, stat);
          if (notRecoverable) {
            result = "Missing too many blocks to be recovered " + 
              "using parity file " + raidInfo.parityPair.getPath();
          } else {
            result = "Has missing blocks but can be read using parity file " +
              raidInfo.parityPair.getPath();
          }
        }
      } else {
        result = "At default replication, not raided";
      }
      out.println("Result of checking " + corruptFile + " : " +
        result);
    }
    out.println("Found " + count + " files with missing blocks");
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:47,代码来源:RaidShell.java


注:本文中的org.apache.hadoop.hdfs.DistributedRaidFileSystem类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。