本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.triggerBlockReports方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.triggerBlockReports方法的具体用法?Java MiniDFSCluster.triggerBlockReports怎么用?Java MiniDFSCluster.triggerBlockReports使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.MiniDFSCluster
的用法示例。
在下文中一共展示了MiniDFSCluster.triggerBlockReports方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testStandbyIsHot
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
Configuration conf = new Configuration();
// We read from the standby to watch block locations
HAUtil.setAllowStandbyReads(conf, true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(3)
.build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
Thread.sleep(1000);
System.err.println("==================================");
DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
// Have to force an edit log roll so that the standby catches up
nn1.getRpcServer().rollEditLog();
System.err.println("==================================");
// Block locations should show up on standby.
LOG.info("Waiting for block locations to appear on standby node");
waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
// Trigger immediate heartbeats and block reports so
// that the active "trusts" all of the DNs
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
// Change replication
LOG.info("Changing replication to 1");
fs.setReplication(TEST_FILE_PATH, (short)1);
BlockManagerTestUtil.computeAllPendingWork(
nn1.getNamesystem().getBlockManager());
waitForBlockLocations(cluster, nn1, TEST_FILE, 1);
nn1.getRpcServer().rollEditLog();
LOG.info("Waiting for lowered replication to show up on standby");
waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
// Change back to 3
LOG.info("Changing replication to 3");
fs.setReplication(TEST_FILE_PATH, (short)3);
BlockManagerTestUtil.computeAllPendingWork(
nn1.getNamesystem().getBlockManager());
nn1.getRpcServer().rollEditLog();
LOG.info("Waiting for higher replication to show up on standby");
waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
} finally {
cluster.shutdown();
}
}
示例2: testInvalidateBlock
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
Configuration conf = new Configuration();
HAUtil.setAllowStandbyReads(conf, true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(3)
.build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
Thread.sleep(1000);
LOG.info("==================================");
DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
// Have to force an edit log roll so that the standby catches up
nn1.getRpcServer().rollEditLog();
LOG.info("==================================");
// delete the file
fs.delete(TEST_FILE_PATH, false);
BlockManagerTestUtil.computeAllPendingWork(
nn1.getNamesystem().getBlockManager());
nn1.getRpcServer().rollEditLog();
// standby nn doesn't need to invalidate blocks.
assertEquals(0,
nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
// standby nn doesn't need to invalidate blocks.
assertEquals(0,
nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
} finally {
cluster.shutdown();
}
}