当前位置: 首页>>代码示例>>Java>>正文


Java DataNode.getDatanodeId方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeId方法的典型用法代码示例。如果您正苦于以下问题:Java DataNode.getDatanodeId方法的具体用法?Java DataNode.getDatanodeId怎么用?Java DataNode.getDatanodeId使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNode的用法示例。


在下文中一共展示了DataNode.getDatanodeId方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runTest

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
private static void runTest(final String testCaseName,
                            final boolean createFiles,
                            final int numInitialStorages,
                            final int expectedStoragesAfterTest) throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster
        .Builder(conf)
        .numDataNodes(1)
        .storagesPerDatanode(numInitialStorages)
        .build();
    cluster.waitActive();

    final DataNode dn0 = cluster.getDataNodes().get(0);

    // Ensure NN knows about the storage.
    final DatanodeID dnId = dn0.getDatanodeId();
    final DatanodeDescriptor dnDescriptor =
        cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
    assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));

    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
    DataNodeTestUtils.triggerBlockReport(dn0);

    if (createFiles) {
      final Path path = new Path("/", testCaseName);
      DFSTestUtil.createFile(
          cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
      DataNodeTestUtils.triggerBlockReport(dn0);
    }

    // Generate a fake StorageReport that is missing one storage.
    final StorageReport reports[] =
        dn0.getFSDataset().getStorageReports(bpid);
    final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1];
    System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);

    // Stop the DataNode and send fake heartbeat with missing storage.
    cluster.stopDataNode(0);
    cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
        0, null);

    // Check that the missing storage was pruned.
    assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestNameNodePrunesMissingStorages.java

示例2: testReplDueToNodeFailRespectsRackPolicy

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
@Test
public void testReplDueToNodeFailRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  // Last datanode is on a different rack
  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();
  final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();

  try {
    // Create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Make the last datanode look like it failed to heartbeat by 
    // calling removeDatanode and stopping it.
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    int idx = datanodes.size() - 1;
    DataNode dataNode = datanodes.get(idx);
    DatanodeID dnId = dataNode.getDatanodeId();
    cluster.stopDataNode(idx);
    dm.removeDatanode(dnId);

    // The block should still have sufficient # replicas, across racks.
    // The last node may not have contained a replica, but if it did
    // it should have been replicated within the same rack.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    
    // Fail the last datanode again, it's also on rack2 so there is
    // only 1 rack for all the replicas
    datanodes = cluster.getDataNodes();
    idx = datanodes.size() - 1;
    dataNode = datanodes.get(idx);
    dnId = dataNode.getDatanodeId();
    cluster.stopDataNode(idx);
    dm.removeDatanode(dnId);

    // Make sure we have enough live replicas even though we are
    // short one rack and therefore need one replica
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestBlocksWithNotEnoughRacks.java

示例3: testReduceReplFactorDueToRejoinRespectsRackPolicy

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
@Test
public void testReduceReplFactorDueToRejoinRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");
  // Last datanode is on a different rack
  String racks[] = {"/rack1", "/rack1", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();
  final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Make the last (cross rack) datanode look like it failed
    // to heartbeat by stopping it and calling removeDatanode.
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(3, datanodes.size());
    DataNode dataNode = datanodes.get(2);
    DatanodeID dnId = dataNode.getDatanodeId();
    cluster.stopDataNode(2);
    dm.removeDatanode(dnId);

    // The block gets re-replicated to another datanode so it has a 
    // sufficient # replicas, but not across racks, so there should
    // be 1 rack, and 1 needed replica (even though there are 2 hosts 
    // available and only 2 replicas required).
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);

    // Start the "failed" datanode, which has a replica so the block is
    // now over-replicated and therefore a replica should be removed but
    // not on the restarted datanode as that would violate the rack policy.
    String rack2[] = {"/rack2"};
    cluster.startDataNodes(conf, 1, true, null, rack2);
    cluster.waitActive();      
    
    // The block now has sufficient # replicas, across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestBlocksWithNotEnoughRacks.java

示例4: runTest

import org.apache.hadoop.hdfs.server.datanode.DataNode; //导入方法依赖的package包/类
private static void runTest(final String testCaseName,
                            final boolean createFiles,
                            final int numInitialStorages,
                            final int expectedStoragesAfterTest) throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster
        .Builder(conf)
        .numDataNodes(1)
        .storagesPerDatanode(numInitialStorages)
        .build();
    cluster.waitActive();

    final DataNode dn0 = cluster.getDataNodes().get(0);

    // Ensure NN knows about the storage.
    final DatanodeID dnId = dn0.getDatanodeId();
    final DatanodeDescriptor dnDescriptor =
        cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
    assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));

    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
    DataNodeTestUtils.triggerBlockReport(dn0);

    if (createFiles) {
      final Path path = new Path("/", testCaseName);
      DFSTestUtil.createFile(
          cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
      DataNodeTestUtils.triggerBlockReport(dn0);
    }

    // Generate a fake StorageReport that is missing one storage.
    final StorageReport reports[] =
        dn0.getFSDataset().getStorageReports(bpid);
    final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1];
    System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);

    // Stop the DataNode and send fake heartbeat with missing storage.
    cluster.stopDataNode(0);
    cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
        0, null, true);

    // Check that the missing storage was pruned.
    assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestNameNodePrunesMissingStorages.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNode.getDatanodeId方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。