本文整理汇总了Java中org.apache.hadoop.hdfs.CorruptFileBlockIterator类的典型用法代码示例。如果您正苦于以下问题:Java CorruptFileBlockIterator类的具体用法?Java CorruptFileBlockIterator怎么用?Java CorruptFileBlockIterator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CorruptFileBlockIterator类属于org.apache.hadoop.hdfs包,在下文中一共展示了CorruptFileBlockIterator类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: listCorruptFileBlocks
import org.apache.hadoop.hdfs.CorruptFileBlockIterator; //导入依赖的package包/类
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
return new CorruptFileBlockIterator(dfs, path);
}
示例2: testMaxCorruptFiles
import org.apache.hadoop.hdfs.CorruptFileBlockIterator; //导入依赖的package包/类
/**
* Test if NN.listCorruptFiles() returns the right number of results.
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls.
*/
@Test
public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
conf.setInt("dfs.datanode.directoryscan.interval", 15); // datanode scans directories
conf.setInt("dfs.blockreport.intervalMsec", 3 * 1000); // datanode sends block reports
final int maxCorruptFileBlocks = 20;
conf.setInt("dfs.corruptfilesreturned.max", maxCorruptFileBlocks);
cluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fs = cluster.getFileSystem();
// create maxCorruptFileBlocks * 3 files with one block each
DFSTestUtil util = new DFSTestUtil("testMaxCorruptFiles",
maxCorruptFileBlocks * 3, 1, 512);
util.createFiles(fs, "/srcdat2", (short) 1);
util.waitReplication(fs, "/srcdat2", (short) 1);
// verify that there are no bad blocks.
final NameNode namenode = cluster.getNameNode();
Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
getNamesystem().listCorruptFileBlocks("/srcdat2", null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
badFiles.size() == 0);
// Now deliberately remove blocks from all files
for (int i=0; i<8; i++) {
File data_dir = cluster.getBlockDirectory("data" +(i+1));
File[] blocks = data_dir.listFiles();
if (blocks == null)
continue;
for (int idx = 0; idx < blocks.length; idx++) {
if (!blocks[idx].getName().startsWith("blk_")) {
continue;
}
assertTrue("Cannot remove file.", blocks[idx].delete());
}
}
badFiles = namenode.getNamesystem().
listCorruptFileBlocks("/srcdat2", null);
while (badFiles.size() < maxCorruptFileBlocks) {
LOG.info("# of corrupt files is: " + badFiles.size());
Thread.sleep(10000);
badFiles = namenode.getNamesystem().
listCorruptFileBlocks("/srcdat2", null);
}
badFiles = namenode.getNamesystem().
listCorruptFileBlocks("/srcdat2", null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " +
maxCorruptFileBlocks + ".",
badFiles.size() == maxCorruptFileBlocks);
CorruptFileBlockIterator iter = (CorruptFileBlockIterator)
fs.listCorruptFileBlocks(new Path("/srcdat2"));
int corruptPaths = countPaths(iter);
assertTrue("Expected more than " + maxCorruptFileBlocks +
" corrupt file blocks but got " + corruptPaths,
corruptPaths > maxCorruptFileBlocks);
assertTrue("Iterator should have made more than 1 call but made " +
iter.getCallsMade(),
iter.getCallsMade() > 1);
util.cleanup(fs, "/srcdat2");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
示例3: listCorruptFileBlocks
import org.apache.hadoop.hdfs.CorruptFileBlockIterator; //导入依赖的package包/类
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
return new CorruptFileBlockIterator(dfs, path);
}