当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getFinalizedDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getFinalizedDir方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getFinalizedDir方法的具体用法?Java MiniDFSCluster.getFinalizedDir怎么用?Java MiniDFSCluster.getFinalizedDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getFinalizedDir方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: corruptBlock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
    int dnIndex, ExtendedBlock block) throws IOException {
  // corrupt the block on datanode dnIndex
  // the indexes change once the nodes are restarted.
  // But the datadirectory will not change
  assertTrue(cluster.corruptReplica(dnIndex, block));

  DataNodeProperties dnProps = cluster.stopDataNode(0);

  // Each datanode has multiple data dirs, check each
  for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = cluster.getStorageDir(dnIndex, dirIndex);
    File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
    File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
    if (scanLogFile.exists()) {
      // wait for one minute for deletion to succeed;
      for (int i = 0; !scanLogFile.delete(); i++) {
        assertTrue("Could not delete log file in one minute", i < 60);
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ignored) {
        }
      }
    }
  }

  // restart the detained so the corrupt replica will be detected
  cluster.restartDataNode(dnProps);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestProcessCorruptBlocks.java

示例2: testlistCorruptFileBlocksDFS

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * test listCorruptFileBlocks in DistributedFileSystem
 */
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).
        setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    RemoteIterator<Path> corruptFileBlocks = 
      dfs.listCorruptFileBlocks(new Path("/corruptData"));
    int numCorrupt = countPaths(corruptFileBlocks);
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    // For loop through number of datadirectories per datanode (2)
    for (int i = 0; i < 2; i++) {
      File storageDir = cluster.getInstanceStorageDir(0, i);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
          data_dir);
      if (metadataFiles == null)
        continue;
      // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
      // (blocks.length > 0));
      for (File metadataFile : metadataFiles) {
        File blockFile = Block.metaToBlockFile(metadataFile);
        LOG.info("Deliberately removing file " + blockFile.getName());
        assertTrue("Cannot remove file.", blockFile.delete());
        LOG.info("Deliberately removing file " + metadataFile.getName());
        assertTrue("Cannot remove file.", metadataFile.delete());
        // break;
      }
    }

    int count = 0;
    corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
    numCorrupt = countPaths(corruptFileBlocks);
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
      numCorrupt = countPaths(corruptFileBlocks);
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);

    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestListCorruptFileBlocks.java

示例3: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestFsck.java

示例4: testVolumeFailure

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testVolumeFailure() throws Exception {
  System.out.println("Data dir: is " +  dataDir.getPath());
 
  
  // Data dir structure is dataDir/data[1-4]/[current,tmp...]
  // data1,2 is for datanode 1, data2,3 - datanode2 
  String filename = "/test.txt";
  Path filePath = new Path(filename);
  
  // we use only small number of blocks to avoid creating subdirs in the data dir..
  int filesize = block_size*blocks_num;
  DFSTestUtil.createFile(fs, filePath, filesize, repl, 1L);
  DFSTestUtil.waitReplication(fs, filePath, repl);
  System.out.println("file " + filename + "(size " +
      filesize + ") is created and replicated");
 
  // fail the volume
  // delete/make non-writable one of the directories (failed volume)
  data_fail = new File(dataDir, "data3");
  failedDir = MiniDFSCluster.getFinalizedDir(dataDir, 
      cluster.getNamesystem().getBlockPoolId());
  if (failedDir.exists() &&
      //!FileUtil.fullyDelete(failedDir)
      !deteteBlocks(failedDir)
      ) {
    throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
  }
  data_fail.setReadOnly();
  failedDir.setReadOnly();
  System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
  
  // access all the blocks on the "failed" DataNode, 
  // we need to make sure that the "failed" volume is being accessed - 
  // and that will cause failure, blocks removal, "emergency" block report
  triggerFailure(filename, filesize);
  
  // make sure a block report is sent 
  DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
  String bpid = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
  
  Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
      dn.getFSDataset().getBlockReports(bpid);

  // Send block report
  StorageBlockReport[] reports =
      new StorageBlockReport[perVolumeBlockLists.size()];

  int reportIndex = 0;
  for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
      DatanodeStorage dnStorage = kvPair.getKey();
      BlockListAsLongs blockList = kvPair.getValue();
      reports[reportIndex++] =
          new StorageBlockReport(dnStorage, blockList);
  }
  
  cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);

  // verify number of blocks and files...
  verify(filename, filesize);
  
  // create another file (with one volume failed).
  System.out.println("creating file test1.txt");
  Path fileName1 = new Path("/test1.txt");
  DFSTestUtil.createFile(fs, fileName1, filesize, repl, 1L);
  
  // should be able to replicate to both nodes (2 DN, repl=2)
  DFSTestUtil.waitReplication(fs, fileName1, repl);
  System.out.println("file " + fileName1.getName() + 
      " is created and replicated");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestDataNodeVolumeFailure.java

示例5: countRealBlocks

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 *  look for real blocks
 *  by counting *.meta files in all the storage dirs 
 * @param map
 * @return
 */
private int countRealBlocks(Map<String, BlockLocs> map) {
  int total = 0;
  final String bpid = cluster.getNamesystem().getBlockPoolId();
  for(int i=0; i<dn_num; i++) {
    for(int j=0; j<=1; j++) {
      File storageDir = cluster.getInstanceStorageDir(i, j);
      File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      if(dir == null) {
        System.out.println("dir is null for dn=" + i + " and data_dir=" + j);
        continue;
      }
    
      List<File> res = MiniDFSCluster.getAllBlockMetadataFiles(dir);
      if(res == null) {
        System.out.println("res is null for dir = " + dir + " i=" + i + " and j=" + j);
        continue;
      }
      //System.out.println("for dn" + i + "." + j + ": " + dir + "=" + res.length+ " files");
    
      //int ii = 0;
      for(File f: res) {
        String s = f.getName();
        // cut off "blk_-" at the beginning and ".meta" at the end
        assertNotNull("Block file name should not be null", s);
        String bid = s.substring(s.indexOf("_")+1, s.lastIndexOf("_"));
        //System.out.println(ii++ + ". block " + s + "; id=" + bid);
        BlockLocs val = map.get(bid);
        if(val == null) {
          val = new BlockLocs();
        }
        val.num_files ++; // one more file for the block
        map.put(bid, val);

      }
      //System.out.println("dir1="+dir.getPath() + "blocks=" + res.length);
      //System.out.println("dir2="+dir2.getPath() + "blocks=" + res2.length);

      total += res.size();
    }
  }
  return total;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestDataNodeVolumeFailure.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getFinalizedDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。