当前位置: 首页>>代码示例>>Java>>正文


Java DataNodeTestUtils.triggerHeartbeat方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.triggerHeartbeat方法的典型用法代码示例。如果您正苦于以下问题:Java DataNodeTestUtils.triggerHeartbeat方法的具体用法?Java DataNodeTestUtils.triggerHeartbeat怎么用?Java DataNodeTestUtils.triggerHeartbeat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils的用法示例。


在下文中一共展示了DataNodeTestUtils.triggerHeartbeat方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyStats

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void verifyStats(NameNode namenode, FSNamesystem fsn,
    DatanodeInfo info, DataNode node, boolean decommissioning)
    throws InterruptedException, IOException {
  // Do the stats check over 10 heartbeats
  for (int i = 0; i < 10; i++) {
    long[] newStats = namenode.getRpcServer().getStats();

    // For decommissioning nodes, ensure capacity of the DN is no longer
    // counted. Only used space of the DN is counted in cluster capacity
    assertEquals(newStats[0],
        decommissioning ? info.getDfsUsed() : info.getCapacity());

    // Ensure cluster used capacity is counted for both normal and
    // decommissioning nodes
    assertEquals(newStats[1], info.getDfsUsed());

    // For decommissioning nodes, remaining space from the DN is not counted
    assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());

    // Ensure transceiver count is same as that DN
    assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
    DataNodeTestUtils.triggerHeartbeat(node);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestDecommission.java

示例2: verifyStats

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void verifyStats(NameNode namenode, FSNamesystem fsn,
    DatanodeInfo info, DataNode node, boolean decommissioning)
    throws InterruptedException, IOException {
  // Do the stats check over 10 heartbeats
  for (int i = 0; i < 10; i++) {
    long[] newStats = namenode.getRpcServer().getStats();

    // For decommissioning nodes, ensure capacity of the DN and dfsUsed
    //  is no longer counted towards total
    assertEquals(newStats[0],
        decommissioning ? 0 : info.getCapacity());

    // Ensure cluster used capacity is counted for normal nodes only
    assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());

    // For decommissioning nodes, remaining space from the DN is not counted
    assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());

    // Ensure transceiver count is same as that DN
    assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
    DataNodeTestUtils.triggerHeartbeat(node);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestDecommission.java

示例3: triggerHeartbeats

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void triggerHeartbeats(List<DataNode> datanodes)
    throws IOException, InterruptedException {
  for (DataNode dn : datanodes) {
    DataNodeTestUtils.triggerHeartbeat(dn);
  }
  Thread.sleep(100);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestNamenodeCapacityReport.java

示例4: testSetrepIncWithUnderReplicatedBlocks

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
@Test(timeout=60000) // 1 min timeout
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final short REPLICATION_FACTOR = 2;
  final String FILE_NAME = "/testFile";
  final Path FILE_PATH = new Path(FILE_NAME);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
  try {
    // create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
    
    // remove one replica from the blocksMap so block becomes under-replicated
    // but the block does not get put into the under-replicated blocks queue
    final BlockManager bm = cluster.getNamesystem().getBlockManager();
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
    DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock())
        .iterator().next().getDatanodeDescriptor();
    bm.addToInvalidates(b.getLocalBlock(), dn);
    // Compute the invalidate work in NN, and trigger the heartbeat from DN
    BlockManagerTestUtil.computeAllPendingWork(bm);
    DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort()));
    // Wait to make sure the DataNode receives the deletion request 
    Thread.sleep(5000);
    // Remove the record from blocksMap
    bm.blocksMap.removeNode(b.getLocalBlock(), dn);
    
    // increment this file's replication factor
    FsShell shell = new FsShell(conf);
    assertEquals(0, shell.run(new String[]{
        "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
  } finally {
    cluster.shutdown();
  }
  
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:38,代码来源:TestUnderReplicatedBlocks.java

示例5: waitForHeartbeat

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void waitForHeartbeat(final DataNode dn, final DatanodeDescriptor dnd)
    throws Exception {
  final long lastUpdate = dnd.getLastUpdateMonotonic();
  Thread.sleep(1);
  DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
  DataNodeTestUtils.triggerHeartbeat(dn);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return lastUpdate != dnd.getLastUpdateMonotonic();
    }
  }, 10, 100000);
  DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestDatanodeRegistration.java

示例6: triggerHeartbeats

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
public void triggerHeartbeats()
    throws IOException {
  for (DataNode dn : getDataNodes()) {
    DataNodeTestUtils.triggerHeartbeat(dn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:MiniDFSCluster.java

示例7: testNoExtraReplicationWhenBlockReceivedIsLate

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * This test makes sure that, when a file is closed before all
 * of the datanodes in the pipeline have reported their replicas,
 * the NameNode doesn't consider the block under-replicated too
 * aggressively. It is a regression test for HDFS-1172.
 */
@Test(timeout=60000)
public void testNoExtraReplicationWhenBlockReceivedIsLate()
    throws Exception {
  LOG.info("Test block replication when blockReceived is late" );
  final short numDataNodes = 3;
  final short replication = 3;
  final Configuration conf = new Configuration();
      conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(numDataNodes).build();
  final String testFile = "/replication-test-file";
  final Path testPath = new Path(testFile);
  final BlockManager bm =
      cluster.getNameNode().getNamesystem().getBlockManager();

  try {
    cluster.waitActive();

    // Artificially delay IBR from 1 DataNode.
    // this ensures that the client's completeFile() RPC will get to the
    // NN before some of the replicas are reported.
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
        DataNodeTestUtils.spyOnBposToNN(dn, nn);
    DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
    Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageReceivedDeletedBlocks[]>anyObject());

    FileSystem fs = cluster.getFileSystem();
    // Create and close a small file with two blocks
    DFSTestUtil.createFile(fs, testPath, 1500, replication, 0);

    // schedule replication via BlockManager#computeReplicationWork
    BlockManagerTestUtil.computeAllPendingWork(bm);

    // Initially, should have some pending replication since the close()
    // is earlier than at lease one of the reportReceivedDeletedBlocks calls
    assertTrue(pendingReplicationCount(bm) > 0);

    // release pending IBR.
    delayer.waitForCall();
    delayer.proceed();
    delayer.waitForResult();

    // make sure DataNodes do replication work if exists
    for (DataNode d : cluster.getDataNodes()) {
      DataNodeTestUtils.triggerHeartbeat(d);
    }

    // Wait until there is nothing pending
    try {
      GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
          return pendingReplicationCount(bm) == 0;
        }
      }, 100, 3000);
    } catch (TimeoutException e) {
      fail("timed out while waiting for no pending replication.");
    }

    // Check that none of the datanodes have serviced a replication request.
    // i.e. that the NameNode didn't schedule any spurious replication.
    assertNoReplicationWasPerformed(cluster);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:80,代码来源:TestReplication.java

示例8: triggerHeartbeats

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
public void triggerHeartbeats() throws IOException {
  for (DataNode dn : getDataNodes()) {
    DataNodeTestUtils.triggerHeartbeat(dn);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:6,代码来源:MiniDFSCluster.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.triggerHeartbeat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。