本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.triggerHeartbeat方法的典型用法代码示例。如果您正苦于以下问题:Java DataNodeTestUtils.triggerHeartbeat方法的具体用法?Java DataNodeTestUtils.triggerHeartbeat怎么用?Java DataNodeTestUtils.triggerHeartbeat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils
的用法示例。
在下文中一共展示了DataNodeTestUtils.triggerHeartbeat方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyStats
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void verifyStats(NameNode namenode, FSNamesystem fsn,
DatanodeInfo info, DataNode node, boolean decommissioning)
throws InterruptedException, IOException {
// Do the stats check over 10 heartbeats
for (int i = 0; i < 10; i++) {
long[] newStats = namenode.getRpcServer().getStats();
// For decommissioning nodes, ensure capacity of the DN is no longer
// counted. Only used space of the DN is counted in cluster capacity
assertEquals(newStats[0],
decommissioning ? info.getDfsUsed() : info.getCapacity());
// Ensure cluster used capacity is counted for both normal and
// decommissioning nodes
assertEquals(newStats[1], info.getDfsUsed());
// For decommissioning nodes, remaining space from the DN is not counted
assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
// Ensure transceiver count is same as that DN
assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
DataNodeTestUtils.triggerHeartbeat(node);
}
}
示例2: verifyStats
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void verifyStats(NameNode namenode, FSNamesystem fsn,
DatanodeInfo info, DataNode node, boolean decommissioning)
throws InterruptedException, IOException {
// Do the stats check over 10 heartbeats
for (int i = 0; i < 10; i++) {
long[] newStats = namenode.getRpcServer().getStats();
// For decommissioning nodes, ensure capacity of the DN and dfsUsed
// is no longer counted towards total
assertEquals(newStats[0],
decommissioning ? 0 : info.getCapacity());
// Ensure cluster used capacity is counted for normal nodes only
assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
// For decommissioning nodes, remaining space from the DN is not counted
assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
// Ensure transceiver count is same as that DN
assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
DataNodeTestUtils.triggerHeartbeat(node);
}
}
示例3: triggerHeartbeats
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void triggerHeartbeats(List<DataNode> datanodes)
throws IOException, InterruptedException {
for (DataNode dn : datanodes) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(100);
}
示例4: testSetrepIncWithUnderReplicatedBlocks
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
@Test(timeout=60000) // 1 min timeout
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
Configuration conf = new HdfsConfiguration();
final short REPLICATION_FACTOR = 2;
final String FILE_NAME = "/testFile";
final Path FILE_PATH = new Path(FILE_NAME);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
try {
// create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
// remove one replica from the blocksMap so block becomes under-replicated
// but the block does not get put into the under-replicated blocks queue
final BlockManager bm = cluster.getNamesystem().getBlockManager();
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock())
.iterator().next().getDatanodeDescriptor();
bm.addToInvalidates(b.getLocalBlock(), dn);
// Compute the invalidate work in NN, and trigger the heartbeat from DN
BlockManagerTestUtil.computeAllPendingWork(bm);
DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort()));
// Wait to make sure the DataNode receives the deletion request
Thread.sleep(5000);
// Remove the record from blocksMap
bm.blocksMap.removeNode(b.getLocalBlock(), dn);
// increment this file's replication factor
FsShell shell = new FsShell(conf);
assertEquals(0, shell.run(new String[]{
"-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
} finally {
cluster.shutdown();
}
}
示例5: waitForHeartbeat
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void waitForHeartbeat(final DataNode dn, final DatanodeDescriptor dnd)
throws Exception {
final long lastUpdate = dnd.getLastUpdateMonotonic();
Thread.sleep(1);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
DataNodeTestUtils.triggerHeartbeat(dn);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return lastUpdate != dnd.getLastUpdateMonotonic();
}
}, 10, 100000);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
示例6: triggerHeartbeats
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
public void triggerHeartbeats()
throws IOException {
for (DataNode dn : getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
}
示例7: testNoExtraReplicationWhenBlockReceivedIsLate
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
* This test makes sure that, when a file is closed before all
* of the datanodes in the pipeline have reported their replicas,
* the NameNode doesn't consider the block under-replicated too
* aggressively. It is a regression test for HDFS-1172.
*/
@Test(timeout=60000)
public void testNoExtraReplicationWhenBlockReceivedIsLate()
throws Exception {
LOG.info("Test block replication when blockReceived is late" );
final short numDataNodes = 3;
final short replication = 3;
final Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes).build();
final String testFile = "/replication-test-file";
final Path testPath = new Path(testFile);
final BlockManager bm =
cluster.getNameNode().getNamesystem().getBlockManager();
try {
cluster.waitActive();
// Artificially delay IBR from 1 DataNode.
// this ensures that the client's completeFile() RPC will get to the
// NN before some of the replicas are reported.
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy =
DataNodeTestUtils.spyOnBposToNN(dn, nn);
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted(
Mockito.<DatanodeRegistration>anyObject(),
Mockito.anyString(),
Mockito.<StorageReceivedDeletedBlocks[]>anyObject());
FileSystem fs = cluster.getFileSystem();
// Create and close a small file with two blocks
DFSTestUtil.createFile(fs, testPath, 1500, replication, 0);
// schedule replication via BlockManager#computeReplicationWork
BlockManagerTestUtil.computeAllPendingWork(bm);
// Initially, should have some pending replication since the close()
// is earlier than at lease one of the reportReceivedDeletedBlocks calls
assertTrue(pendingReplicationCount(bm) > 0);
// release pending IBR.
delayer.waitForCall();
delayer.proceed();
delayer.waitForResult();
// make sure DataNodes do replication work if exists
for (DataNode d : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(d);
}
// Wait until there is nothing pending
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return pendingReplicationCount(bm) == 0;
}
}, 100, 3000);
} catch (TimeoutException e) {
fail("timed out while waiting for no pending replication.");
}
// Check that none of the datanodes have serviced a replication request.
// i.e. that the NameNode didn't schedule any spurious replication.
assertNoReplicationWasPerformed(cluster);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例8: triggerHeartbeats
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
public void triggerHeartbeats() throws IOException {
for (DataNode dn : getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
}