本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.formatNameNode方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.formatNameNode方法的具体用法?Java DFSTestUtil.formatNameNode怎么用?Java DFSTestUtil.formatNameNode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.formatNameNode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFSNamespaceClearLeases
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Test that FSNamesystem#clear clears all leases.
*/
@Test
public void testFSNamespaceClearLeases() throws Exception {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan = fsn.getLeaseManager();
leaseMan.addLease("client1", "importantFile");
assertEquals(1, leaseMan.countLease());
fsn.clear();
leaseMan = fsn.getLeaseManager();
assertEquals(0, leaseMan.countLease());
}
示例2: testThatMatchingRPCandHttpPortsThrowException
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Tests setting the rpc port to the same as the web port to test that
* an exception
* is thrown when trying to re-use the same port
*/
@Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException()
throws IOException {
NameNode nameNode = null;
try {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
Random rand = new Random();
final int port = 30000 + rand.nextInt(30000);
// set both of these to the same port. It should fail.
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
DFSTestUtil.formatNameNode(conf);
nameNode = new NameNode(conf);
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
}
示例3: testCanLoadCreatedEditsLog
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Tests that an edits log created using CreateEditsLog is valid and can be
* loaded successfully by a namenode.
*/
@Test(timeout=60000)
public void testCanLoadCreatedEditsLog() throws Exception {
// Format namenode.
HdfsConfiguration conf = new HdfsConfiguration();
File nameDir = new File(HDFS_DIR, "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
DFSTestUtil.formatNameNode(conf);
// Call CreateEditsLog and move the resulting edits to the name dir.
CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d",
TEST_DIR.getAbsolutePath() });
Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
FileContext localFc = FileContext.getLocalFSFileContext();
for (FileStatus edits: localFc.util().globStatus(editsWildcard)) {
Path src = edits.getPath();
Path dst = new Path(new File(nameDir, "current").getAbsolutePath(),
src.getName());
localFc.rename(src, dst);
}
// Start a namenode to try to load the edits.
cluster = new MiniDFSCluster.Builder(conf)
.format(false)
.manageNameDfsDirs(false)
.waitSafeMode(false)
.build();
cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
示例4: testNNThroughput
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
*/
@Test
public void testNNThroughput() throws Exception {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
DFSTestUtil.formatNameNode(conf);
String[] args = new String[] {"-op", "all"};
NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
示例5: testSaveWhileEditsRolled
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test (timeout=30000)
public void testSaveWhileEditsRolled() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
try {
doAnEdit(fsn, 1);
CheckpointSignature sig = fsn.rollEditLog();
LOG.warn("Checkpoint signature: " + sig);
// Do another edit
doAnEdit(fsn, 2);
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fsn.saveNamespace();
// Now shut down and restart the NN
fsn.close();
fsn = null;
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
checkEditExists(fsn, 2);
} finally {
if (fsn != null) {
fsn.close();
}
}
}
示例6: testTxIdPersistence
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test (timeout=30000)
public void testTxIdPersistence() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
try {
// We have a BEGIN_LOG_SEGMENT txn to start
assertEquals(1, fsn.getEditLog().getLastWrittenTxId());
doAnEdit(fsn, 1);
assertEquals(2, fsn.getEditLog().getLastWrittenTxId());
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fsn.saveNamespace();
// 2 more txns: END the first segment, BEGIN a new one
assertEquals(4, fsn.getEditLog().getLastWrittenTxId());
// Shut down and restart
fsn.getFSImage().close();
fsn.close();
// 1 more txn to END that segment
assertEquals(5, fsn.getEditLog().getLastWrittenTxId());
fsn = null;
fsn = FSNamesystem.loadFromDisk(conf);
// 1 more txn to start new segment on restart
assertEquals(6, fsn.getEditLog().getLastWrittenTxId());
} finally {
if (fsn != null) {
fsn.close();
}
}
}
示例7: testThatDifferentRPCandHttpPortsAreOK
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Tests setting the rpc port to a different as the web port that an
* exception is NOT thrown
*/
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK()
throws IOException {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
Random rand = new Random();
// A few retries in case the ports we choose are in use.
for (int i = 0; i < 5; ++i) {
final int port1 = 30000 + rand.nextInt(10000);
final int port2 = port1 + 1 + rand.nextInt(10000);
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
DFSTestUtil.formatNameNode(conf);
NameNode nameNode = null;
try {
nameNode = new NameNode(conf); // should be OK!
break;
} catch(BindException be) {
continue; // Port in use? Try another.
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
}
}
示例8: testGenericKeysForNameNodeFormat
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* HDFS-3013: NameNode format command doesn't pick up
* dfs.namenode.name.dir.NameServiceId configuration.
*/
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
throws IOException {
Configuration conf = new HdfsConfiguration();
// Set ephemeral ports
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
// Set a nameservice-specific configuration for name dir
File dir = new File(MiniDFSCluster.getBaseDirectory(),
"testGenericKeysForNameNodeFormat");
if (dir.exists()) {
FileUtil.fullyDelete(dir);
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
dir.getAbsolutePath());
// Format and verify the right dir is formatted.
DFSTestUtil.formatNameNode(conf);
GenericTestUtils.assertExists(dir);
// Ensure that the same dir is picked up by the running NN
NameNode nameNode = new NameNode(conf);
nameNode.stop();
}
示例9: setUp
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
// Set properties to make HDFS aware of NodeGroup.
CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
BlockPlacementPolicyWithNodeGroup.class.getName());
CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
NetworkTopologyWithNodeGroup.class.getName());
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(CONF);
namenode = new NameNode(CONF);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for(int i=0; i<NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
}
setupDataNodeCapacity();
}
示例10: doTestFailedSaveNamespace
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Injects a failure on all storage directories while saving namespace.
*
* @param restoreStorageAfterFailure if true, will try to save again after
* clearing the failure injection
*/
public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure)
throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
final FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
Whitebox.setInternalState(fsn, "fsImage", spyImage);
spyImage.storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
doThrow(new IOException("Injected fault: saveFSImage")).
when(spyImage).saveFSImage(
(SaveNamespaceContext)anyObject(),
(StorageDirectory)anyObject(), (NameNodeFile) anyObject());
try {
doAnEdit(fsn, 1);
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
fsn.saveNamespace();
fail("saveNamespace did not fail even when all directories failed!");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
}
// Ensure that, if storage dirs come back online, things work again.
if (restoreStorageAfterFailure) {
Mockito.reset(spyImage);
spyStorage.setRestoreFailedStorage(true);
fsn.saveNamespace();
checkEditExists(fsn, 1);
}
// Now shut down and restart the NN
originalImage.close();
fsn.close();
fsn = null;
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
} finally {
if (fsn != null) {
fsn.close();
}
}
}
示例11: testCancelSaveNamespace
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=20000)
public void testCancelSaveNamespace() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
final FSImage image = fsn.getFSImage();
NNStorage storage = image.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
FSNamesystem spyFsn = spy(fsn);
final FSNamesystem finalFsn = spyFsn;
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
BlockIdManager bid = spy(spyFsn.getBlockIdManager());
Whitebox.setInternalState(finalFsn, "blockIdManager", bid);
doAnswer(delayer).when(bid).getGenerationStampV2();
ExecutorService pool = Executors.newFixedThreadPool(2);
try {
doAnEdit(fsn, 1);
final Canceler canceler = new Canceler();
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
Future<Void> saverFuture = pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler);
return null;
}
});
// Wait until saveNamespace calls getGenerationStamp
delayer.waitForCall();
// then cancel the saveNamespace
Future<Void> cancelFuture = pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
canceler.cancel("cancelled");
return null;
}
});
// give the cancel call time to run
Thread.sleep(500);
// allow saveNamespace to proceed - it should check the cancel flag after
// this point and throw an exception
delayer.proceed();
cancelFuture.get();
saverFuture.get();
fail("saveNamespace did not fail even though cancelled!");
} catch (Throwable t) {
GenericTestUtils.assertExceptionContains(
"SaveNamespaceCancelledException", t);
}
LOG.info("Successfully cancelled a saveNamespace");
// Check that we have only the original image and not any
// cruft left over from half-finished images
FSImageTestUtil.logStorageContents(LOG, storage);
for (StorageDirectory sd : storage.dirIterable(null)) {
File curDir = sd.getCurrentDir();
GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*",
NNStorage.getImageFileName(0),
NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
}
} finally {
fsn.close();
}
}
示例12: testCompression
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testCompression() throws IOException {
LOG.info("Test compressing image.");
Configuration conf = new Configuration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(base_dir, "name").getPath());
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
DFSTestUtil.formatNameNode(conf);
// create an uncompressed image
LOG.info("Create an uncompressed fsimage");
NameNode namenode = new NameNode(conf);
namenode.getNamesystem().mkdirs("/test",
new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
NamenodeProtocols nnRpc = namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
nnRpc.saveNamespace();
namenode.stop();
namenode.join();
// compress image using default codec
LOG.info("Read an uncomressed image and store it compressed using default codec.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
checkNameSpace(conf);
// read image compressed using the default and compress it using Gzip codec
LOG.info("Read a compressed image and store it using a different codec.");
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
"org.apache.hadoop.io.compress.GzipCodec");
checkNameSpace(conf);
// read an image compressed in Gzip and store it uncompressed
LOG.info("Read a compressed image and store it as uncompressed.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
checkNameSpace(conf);
// read an uncomrpessed image and store it uncompressed
LOG.info("Read an uncompressed image and store it as uncompressed.");
checkNameSpace(conf);
}
示例13: setupCluster
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws IOException {
Configuration conf = new HdfsConfiguration();
final String[] racks = {
"/rack1",
"/rack1",
"/rack1",
"/rack2",
"/rack2",
"/rack2"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
int blockSize = 1024;
dnrList = new ArrayList<DatanodeRegistration>();
dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();
// Register DNs
for (int i=0; i < 6; i++) {
DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
VersionInfo.getVersion());
dnrList.add(dnr);
dnManager.registerDatanode(dnr);
dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
dataNodes[i].updateHeartbeat(
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
0L, 0L, 0, 0, null);
}
}
示例14: setupCluster
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration();
final String[] racks = {
"/d1/r1",
"/d1/r1",
"/d1/r2",
"/d1/r2",
"/d2/r3",
"/d2/r3"};
storages = DFSTestUtil.createDatanodeStorageInfos(racks);
dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
// create an extra storage for dn5.
DatanodeStorage extraStorage = new DatanodeStorage(
storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
StorageType.DEFAULT);
/* DatanodeStorageInfo si = new DatanodeStorageInfo(
storages[5].getDatanodeDescriptor(), extraStorage);
*/
BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
extraStorage);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(
dataNodes[i]);
}
resetHeartbeatForStorages();
}
示例15: testDNWithInvalidStorageWithHA
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
.addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
.numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("BPOfferService should be running", 1,
dn.getAllBpOs().length);
DataNodeProperties dnProp = cluster.stopDataNode(0);
cluster.getNameNode(0).stop();
cluster.getNameNode(1).stop();
Configuration nn1 = cluster.getConfiguration(0);
Configuration nn2 = cluster.getConfiguration(1);
// setting up invalid cluster
StartupOption.FORMAT.setClusterId("cluster-2");
DFSTestUtil.formatNameNode(nn1);
MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
FSNamesystem.getNamespaceDirs(nn2), nn2);
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, false);
cluster.restartDataNode(dnProp);
// let the initialization be complete
Thread.sleep(10000);
dn = cluster.getDataNodes().get(0);
assertFalse("Datanode should have shutdown as only service failed",
dn.isDatanodeUp());
} finally {
cluster.shutdown();
}
}