本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil.waitForStandbyToCatchUp方法的典型用法代码示例。如果您正苦于以下问题:Java HATestUtil.waitForStandbyToCatchUp方法的具体用法?Java HATestUtil.waitForStandbyToCatchUp怎么用?Java HATestUtil.waitForStandbyToCatchUp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil
的用法示例。
在下文中一共展示了HATestUtil.waitForStandbyToCatchUp方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEncryptionZonesTrackedOnStandby
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
* Test that encryption zones are properly tracked by the standby.
*/
@Test(timeout = 60000)
public void testEncryptionZonesTrackedOnStandby() throws Exception {
final int len = 8196;
final Path dir = new Path("/enc");
final Path dirChild = new Path(dir, "child");
final Path dirFile = new Path(dir, "file");
fs.mkdir(dir, FsPermission.getDirDefault());
dfsAdmin0.createEncryptionZone(dir, TEST_KEY);
fs.mkdir(dirChild, FsPermission.getDirDefault());
DFSTestUtil.createFile(fs, dirFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, dirFile);
// Failover the current standby to active.
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
Assert.assertEquals("Got unexpected ez path", dir.toString(),
dfsAdmin1.getEncryptionZoneForPath(dir).getPath().toString());
Assert.assertEquals("Got unexpected ez path", dir.toString(),
dfsAdmin1.getEncryptionZoneForPath(dirChild).getPath().toString());
Assert.assertEquals("File contents after failover were changed",
contents, DFSTestUtil.readFile(fs, dirFile));
}
示例2: assertCanStartHANameNodes
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
private void assertCanStartHANameNodes(MiniDFSCluster cluster,
Configuration conf, String path) throws ServiceFailedException,
IOException, URISyntaxException, InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
cluster
.getNameNode(0)
.getRpcServer()
.transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(path);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(newPath));
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
}
}
}
示例3: testBootstrapStandbyWithActiveNN
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
* While boostrapping, in_progress transaction entries should be skipped.
* Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
*/
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
// make nn0 active
cluster.transitionToActive(0);
// do ops and generate in-progress edit log data
Configuration confNN1 = cluster.getConfiguration(1);
DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
.configureFailoverFs(cluster, confNN1);
for (int i = 1; i <= 10; i++) {
dfs.mkdirs(new Path("/test" + i));
}
dfs.close();
// shutdown nn1 and delete its edit log files
cluster.shutdownNameNode(1);
deleteEditLogIfExists(confNN1);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
cluster.getNameNodeRpc(0).saveNamespace();
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
// check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
// immediately after saveNamespace
int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
confNN1);
Assert.assertEquals("Mismatches return code", 6, rc);
// check with -skipSharedEditsCheck
rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
"-skipSharedEditsCheck" }, confNN1);
Assert.assertEquals("Mismatches return code", 0, rc);
// Checkpoint as fast as we can, in a tight loop.
confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
cluster.restartNameNode(1);
cluster.transitionToStandby(1);
NameNode nn0 = cluster.getNameNode(0);
HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
.getFSImage().getMostRecentCheckpointTxId();
HATestUtil.waitForCheckpoint(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId));
// Should have copied over the namespace
FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
示例4: testNNDirectorySize
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testNNDirectorySize() throws Exception{
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
// Have to specify IPC ports so the NNs can talk to each other.
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1")
.setIpcPort(ServerSocketUtil.getPort(0, 100)))
.addNN(new MiniDFSNNTopology.NNConf("nn2")
.setIpcPort(ServerSocketUtil.getPort(0, 100))));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology).numDataNodes(0)
.build();
FileSystem fs = null;
try {
cluster.waitActive();
FSNamesystem nn0 = cluster.getNamesystem(0);
FSNamesystem nn1 = cluster.getNamesystem(1);
checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
cluster.transitionToActive(0);
fs = cluster.getFileSystem(0);
DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
//rollEditLog
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
checkNNDirSize(cluster.getNameDirs(1), nn1.getNameDirSize());
//Test metric after call saveNamespace
DFSTestUtil.createFile(fs, new Path("/file"), 0, (short) 1, 0L);
nn0.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
nn0.saveNamespace(0, 0);
checkNNDirSize(cluster.getNameDirs(0), nn0.getNameDirSize());
} finally {
cluster.shutdown();
}
}
示例5: testTransactionSinceLastCheckpointMetrics
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
* Testing TransactionsSinceLastCheckpoint. Need a new cluster as
* the other tests in here don't use HA. See HDFS-7501.
*/
@Test(timeout = 300000)
public void testTransactionSinceLastCheckpointMetrics() throws Exception {
Random random = new Random();
int retryCount = 0;
while (retryCount < 5) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
HdfsConfiguration conf2 = new HdfsConfiguration();
// Lower the checkpoint condition for purpose of testing.
conf2.setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
100);
// Check for checkpoint condition very often, for purpose of testing.
conf2.setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
1);
// Poll and follow ANN txns very often, for purpose of testing.
conf2.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
.nnTopology(topology).numDataNodes(1).build();
cluster2.waitActive();
DistributedFileSystem fs2 = cluster2.getFileSystem(0);
NameNode nn0 = cluster2.getNameNode(0);
NameNode nn1 = cluster2.getNameNode(1);
cluster2.transitionToActive(0);
fs2.mkdirs(new Path("/tmp-t1"));
fs2.mkdirs(new Path("/tmp-t2"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure tracking works before the first-ever
// checkpoint.
assertEquals("SBN failed to track 2 transactions pre-checkpoint.",
4L, // 2 txns added further when catch-up is called.
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
// Complete up to the boundary required for
// an auto-checkpoint. Using 94 to expect fsimage
// rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
for (int i = 1; i <= 94; i++) {
fs2.mkdirs(new Path("/tmp-" + i));
}
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Assert 100 transactions in checkpoint.
HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
// Test to ensure number tracks the right state of
// uncheckpointed edits, and does not go negative
// (as fixed in HDFS-7501).
assertEquals("Should be zero right after the checkpoint.",
0L,
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
fs2.mkdirs(new Path("/tmp-t3"));
fs2.mkdirs(new Path("/tmp-t4"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure we track the right numbers after
// the checkpoint resets it to zero again.
assertEquals("SBN failed to track 2 added txns after the ckpt.",
4L,
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
cluster2.shutdown();
break;
} catch (Exception e) {
LOG.warn("Unable to set up HA cluster, exception thrown: " + e);
retryCount++;
}
}
}
示例6: testBootstrapStandbyWithActiveNN
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
* While boostrapping, in_progress transaction entries should be skipped.
* Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
*/
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
// make nn0 active
cluster.transitionToActive(0);
// do ops and generate in-progress edit log data
Configuration confNN1 = cluster.getConfiguration(1);
DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
.configureFailoverFs(cluster, confNN1);
for (int i = 1; i <= 10; i++) {
dfs.mkdirs(new Path("/test" + i));
}
dfs.close();
// shutdown nn1 and delete its edit log files
cluster.shutdownNameNode(1);
deleteEditLogIfExists(confNN1);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
cluster.getNameNodeRpc(0).saveNamespace(0, 0);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
// check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
// immediately after saveNamespace
int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
confNN1);
Assert.assertEquals("Mismatches return code", 6, rc);
// check with -skipSharedEditsCheck
rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
"-skipSharedEditsCheck" }, confNN1);
Assert.assertEquals("Mismatches return code", 0, rc);
// Checkpoint as fast as we can, in a tight loop.
confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
cluster.restartNameNode(1);
cluster.transitionToStandby(1);
NameNode nn0 = cluster.getNameNode(0);
HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
.getFSImage().getMostRecentCheckpointTxId();
HATestUtil.waitForCheckpoint(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId));
// Should have copied over the namespace
FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
}