本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getInstanceStorageDir方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getInstanceStorageDir方法的具体用法?Java MiniDFSCluster.getInstanceStorageDir怎么用?Java MiniDFSCluster.getInstanceStorageDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.MiniDFSCluster
的用法示例。
在下文中一共展示了MiniDFSCluster.getInstanceStorageDir方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testlistCorruptFileBlocksDFS
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
* test listCorruptFileBlocks in DistributedFileSystem
*/
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
// directories
FileSystem fs = null;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DistributedFileSystem dfs = (DistributedFileSystem) fs;
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testGetCorruptFiles").setNumFiles(3).
setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs, "/corruptData");
RemoteIterator<Path> corruptFileBlocks =
dfs.listCorruptFileBlocks(new Path("/corruptData"));
int numCorrupt = countPaths(corruptFileBlocks);
assertTrue(numCorrupt == 0);
// delete the blocks
String bpid = cluster.getNamesystem().getBlockPoolId();
// For loop through number of datadirectories per datanode (2)
for (int i = 0; i < 2; i++) {
File storageDir = cluster.getInstanceStorageDir(0, i);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
data_dir);
if (metadataFiles == null)
continue;
// assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
// (blocks.length > 0));
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.", blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.", metadataFile.delete());
// break;
}
}
int count = 0;
corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt = countPaths(corruptFileBlocks);
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt = countPaths(corruptFileBlocks);
count++;
if (count > 30)
break;
}
// Validate we get all the corrupt files
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
util.cleanup(fs, "/corruptData");
util.cleanup(fs, "/goodData");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例2: testFsckListCorruptFilesBlocks
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
FileSystem fs = null;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
setMaxSize(1024).build();
util.createFiles(fs, "/corruptData", (short) 1);
util.waitReplication(fs, "/corruptData", (short) 1);
// String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
System.out.println("1. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
// delete the blocks
final String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i=0; i<4; i++) {
for (int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
data_dir);
if (metadataFiles == null)
continue;
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.", blockFile.delete());
assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}
// wait for the namenode to see the corruption
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
CorruptFileBlocks corruptFileBlocks = namenode
.listCorruptFileBlocks("/corruptData", null);
int numCorrupt = corruptFileBlocks.getFiles().length;
while (numCorrupt == 0) {
Thread.sleep(1000);
corruptFileBlocks = namenode
.listCorruptFileBlocks("/corruptData", null);
numCorrupt = corruptFileBlocks.getFiles().length;
}
outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
System.out.println("2. bad fsck out: " + outStr);
assertTrue(outStr.contains("has 3 CORRUPT files"));
// Do a listing on a dir which doesn't have any corrupt blocks and validate
util.createFiles(fs, "/goodData");
outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
System.out.println("3. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
util.cleanup(fs,"/corruptData");
util.cleanup(fs, "/goodData");
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例3: testDfsAdminDeleteBlockPool
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"namesServerId1,namesServerId2");
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
.numDataNodes(1).build();
cluster.waitActive();
FileSystem fs1 = cluster.getFileSystem(0);
FileSystem fs2 = cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
DataNode dn1 = cluster.getDataNodes().get(0);
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
Configuration nn1Conf = cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1, dn1.getAllBpOs().length);
DFSAdmin admin = new DFSAdmin(nn1Conf);
String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
int ret = admin.run(args);
assertFalse(0 == ret);
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
ret = admin.run(forceArgs);
assertEquals(0, ret);
verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
//bpid1 remains good
verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}