当前位置: 首页>>代码示例>>Java>>正文


Java CorruptFileBlocks.getFiles方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.CorruptFileBlocks.getFiles方法的典型用法代码示例。如果您正苦于以下问题:Java CorruptFileBlocks.getFiles方法的具体用法?Java CorruptFileBlocks.getFiles怎么用?Java CorruptFileBlocks.getFiles使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.CorruptFileBlocks的用法示例。


在下文中一共展示了CorruptFileBlocks.getFiles方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadNext

import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; //导入方法依赖的package包/类
private void loadNext() throws IOException {
  if (files == null || fileIdx >= files.length) {
    CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie);
    files = cfb.getFiles();
    cookie = cfb.getCookie();
    fileIdx = 0;
    callsMade++;
  }

  if (fileIdx >= files.length) {
    // received an empty response
    // there are no more corrupt file blocks
    nextPath = null;
  } else {
    nextPath = string2Path(files[fileIdx]);
    fileIdx++;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:CorruptFileBlockIterator.java

示例2: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestFsck.java

示例3: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong("dfs.blockreport.intervalMsec", 1000);
  conf.setInt("dfs.datanode.directoryscan.interval", 1);
  conf.setBoolean("dfs.permissions", false);
  FileSystem fs = null;
  
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster(conf, 1, true, null);
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);
    
    // String outStr = runFsck(conf, 0, true, "/corruptData",
    // "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData",
                            "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    for (int i=0; i<8; i++) {
      File data_dir = cluster.getBlockDirectory("data" + (i + 1));
      File[] blocks = data_dir.listFiles();
      if (blocks == null)
        continue;
      
      for (int idx = 0; idx < blocks.length; idx++) {
        if (!blocks[idx].getName().startsWith("blk_")) {
          continue;
        }
        assertTrue("Cannot remove file.", blocks[idx].delete());
      }
    }
    
    // wait for the namenode to see the corruption
    final NameNode namenode = cluster.getNameNode();
    CorruptFileBlocks corruptFileBlocks = namenode
      .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData",
                     "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));
    
    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:66,代码来源:TestFsck.java

示例4: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        File[] blocks = data_dir.listFiles();
        if (blocks == null)
          continue;

        for (int idx = 0; idx < blocks.length; idx++) {
          if (!blocks[idx].getName().startsWith("blk_")) {
            continue;
          }
          assertTrue("Cannot remove file.", blocks[idx].delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:69,代码来源:TestFsck.java

示例5: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; //导入方法依赖的package包/类
/**
 * check if option -list-corruptfiles of fsck command works properly
 */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    String outStr =
        runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < 4; i++) {
      for (int j = 0; j <= 1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        File[] blocks = data_dir.listFiles();
        if (blocks == null) {
          continue;
        }

        for (File block : blocks) {
          if (!block.getName().startsWith("blk_")) {
            continue;
          }
          assertTrue("Cannot remove file.", block.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks =
        namenode.listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks =
          namenode.listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr =
        runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:75,代码来源:TestFsck.java


注:本文中的org.apache.hadoop.hdfs.protocol.CorruptFileBlocks.getFiles方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。