本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNode.doRollback方法的典型用法代码示例。如果您正苦于以下问题:Java NameNode.doRollback方法的具体用法?Java NameNode.doRollback怎么用?Java NameNode.doRollback使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.NameNode
的用法示例。
在下文中一共展示了NameNode.doRollback方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startNameNodeShouldFail
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
*/
void startNameNodeShouldFail(String searchString) {
try {
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build(); // should fail
throw new AssertionError("NameNode should have failed to start");
} catch (Exception expected) {
if (!expected.getMessage().contains(searchString)) {
fail("Expected substring '" + searchString + "' in exception " +
"but got: " + StringUtils.stringifyException(expected));
}
// expected
}
}
示例2: testRollbackWithNfs
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
* Test rollback with NFS shared dir.
*/
@Test
public void testRollbackWithNfs() throws Exception {
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.build();
File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
// No upgrade is in progress at the moment.
checkClusterPreviousDirExistence(cluster, false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir, false);
// Transition NN0 to active and do some FS ops.
cluster.transitionToActive(0);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
// Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
// flag.
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0, false);
checkNnPreviousDirExistence(cluster, 0, true);
checkNnPreviousDirExistence(cluster, 1, false);
checkPreviousDirExistence(sharedDir, true);
// NN0 should come up in the active state when given the -upgrade option,
// so no need to transition it to active.
assertTrue(fs.mkdirs(new Path("/foo2")));
// Now bootstrap the standby with the upgraded info.
int rc = BootstrapStandby.run(
new String[]{"-force"},
cluster.getConfiguration(1));
assertEquals(0, rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster, 0, true);
checkNnPreviousDirExistence(cluster, 1, true);
checkPreviousDirExistence(sharedDir, true);
assertCTimesEqual(cluster);
// Now shut down the cluster and do the rollback.
Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf, false);
// The rollback operation should have rolled back the first NN's local
// dirs, and the shared dir, but not the other NN's dirs. Those have to be
// done by bootstrapping the standby.
checkNnPreviousDirExistence(cluster, 0, false);
checkPreviousDirExistence(sharedDir, false);
} finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
示例3: testRollbackWithJournalNodes
import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Test
public void testRollbackWithJournalNodes() throws IOException,
URISyntaxException {
MiniQJMHACluster qjCluster = null;
FileSystem fs = null;
try {
Builder builder = new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder()
.numDataNodes(0);
qjCluster = builder.build();
MiniDFSCluster cluster = qjCluster.getDfsCluster();
// No upgrade is in progress at the moment.
checkClusterPreviousDirExistence(cluster, false);
assertCTimesEqual(cluster);
checkJnPreviousDirExistence(qjCluster, false);
// Transition NN0 to active and do some FS ops.
cluster.transitionToActive(0);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
// Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
// flag.
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0, false);
checkNnPreviousDirExistence(cluster, 0, true);
checkNnPreviousDirExistence(cluster, 1, false);
checkJnPreviousDirExistence(qjCluster, true);
// NN0 should come up in the active state when given the -upgrade option,
// so no need to transition it to active.
assertTrue(fs.mkdirs(new Path("/foo2")));
final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
assertTrue(cidDuringUpgrade > cidBeforeUpgrade);
// Now bootstrap the standby with the upgraded info.
int rc = BootstrapStandby.run(
new String[]{"-force"},
cluster.getConfiguration(1));
assertEquals(0, rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster, 0, true);
checkNnPreviousDirExistence(cluster, 1, true);
checkJnPreviousDirExistence(qjCluster, true);
assertCTimesEqual(cluster);
// Shut down the NNs, but deliberately leave the JNs up and running.
Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf, false);
final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
assertTrue(cidBeforeUpgrade < cidAfterRollback);
// make sure the committedTxnId has been reset correctly after rollback
assertTrue(cidDuringUpgrade > cidAfterRollback);
// The rollback operation should have rolled back the first NN's local
// dirs, and the shared dir, but not the other NN's dirs. Those have to be
// done by bootstrapping the standby.
checkNnPreviousDirExistence(cluster, 0, false);
checkJnPreviousDirExistence(qjCluster, false);
} finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}