本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.triggerHeartbeats方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.triggerHeartbeats方法的具体用法?Java MiniDFSCluster.triggerHeartbeats怎么用?Java MiniDFSCluster.triggerHeartbeats使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.MiniDFSCluster
的用法示例。
在下文中一共展示了MiniDFSCluster.triggerHeartbeats方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testStandbyIsHot
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
Configuration conf = new Configuration();
// We read from the standby to watch block locations
HAUtil.setAllowStandbyReads(conf, true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(3)
.build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
Thread.sleep(1000);
System.err.println("==================================");
DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
// Have to force an edit log roll so that the standby catches up
nn1.getRpcServer().rollEditLog();
System.err.println("==================================");
// Block locations should show up on standby.
LOG.info("Waiting for block locations to appear on standby node");
waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
// Trigger immediate heartbeats and block reports so
// that the active "trusts" all of the DNs
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
// Change replication
LOG.info("Changing replication to 1");
fs.setReplication(TEST_FILE_PATH, (short)1);
BlockManagerTestUtil.computeAllPendingWork(
nn1.getNamesystem().getBlockManager());
waitForBlockLocations(cluster, nn1, TEST_FILE, 1);
nn1.getRpcServer().rollEditLog();
LOG.info("Waiting for lowered replication to show up on standby");
waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
// Change back to 3
LOG.info("Changing replication to 3");
fs.setReplication(TEST_FILE_PATH, (short)3);
BlockManagerTestUtil.computeAllPendingWork(
nn1.getNamesystem().getBlockManager());
nn1.getRpcServer().rollEditLog();
LOG.info("Waiting for higher replication to show up on standby");
waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
} finally {
cluster.shutdown();
}
}
示例2: testInvalidateBlock
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
Configuration conf = new Configuration();
HAUtil.setAllowStandbyReads(conf, true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(3)
.build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
Thread.sleep(1000);
LOG.info("==================================");
DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
// Have to force an edit log roll so that the standby catches up
nn1.getRpcServer().rollEditLog();
LOG.info("==================================");
// delete the file
fs.delete(TEST_FILE_PATH, false);
BlockManagerTestUtil.computeAllPendingWork(
nn1.getNamesystem().getBlockManager());
nn1.getRpcServer().rollEditLog();
// standby nn doesn't need to invalidate blocks.
assertEquals(0,
nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
// standby nn doesn't need to invalidate blocks.
assertEquals(0,
nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
} finally {
cluster.shutdown();
}
}
示例3: testTwoReplicaShouldNotInSameDN
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
* Test special case. Two replicas belong to same block should not in same node.
* We have 2 nodes.
* We have a block in (DN0,SSD) and (DN1,DISK).
* Replica in (DN0,SSD) should not be moved to (DN1,SSD).
* Otherwise DN1 has 2 replicas.
*/
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
final Configuration conf = new HdfsConfiguration();
int blockSize = 5 * 1024 * 1024 ;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
int numOfDatanodes =2;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2)
.racks(new String[]{"/default/rack0", "/default/rack0"})
.storagesPerDatanode(2)
.storageTypes(new StorageType[][]{
{StorageType.SSD, StorageType.DISK},
{StorageType.SSD, StorageType.DISK}})
.storageCapacities(new long[][]{
{100 * blockSize, 20 * blockSize},
{20 * blockSize, 100 * blockSize}})
.build();
try {
cluster.waitActive();
//set "/bar" directory with ONE_SSD storage policy.
DistributedFileSystem fs = cluster.getFileSystem();
Path barDir = new Path("/bar");
fs.mkdir(barDir,new FsPermission((short)777));
fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
// and (DN0,SSD) and (DN1,DISK) are about 15% full.
long fileLen = 30 * blockSize;
// fooFile has ONE_SSD policy. So
// (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
// (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
Path fooFile = new Path(barDir, "foo");
createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
// update space info
cluster.triggerHeartbeats();
Balancer.Parameters p = Balancer.Parameters.DEFAULT;
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
final int r = Balancer.run(namenodes, p, conf);
// Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
// already has one. Otherwise DN1 will have 2 replicas.
// For same reason, no replicas were moved.
assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
} finally {
cluster.shutdown();
}
}