当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeRegistration.getDatanodeUuid方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration.getDatanodeUuid方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeRegistration.getDatanodeUuid方法的具体用法?Java DatanodeRegistration.getDatanodeUuid怎么用?Java DatanodeRegistration.getDatanodeUuid使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration的用法示例。


在下文中一共展示了DatanodeRegistration.getDatanodeUuid方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: bpRegistrationSucceeded

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入方法依赖的package包/类
/**
 * Check that the registration returned from a NameNode is consistent
 * with the information in the storage. If the storage is fresh/unformatted,
 * sets the storage ID based on this registration.
 * Also updates the block pool's state in the secret manager.
 */
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
    String blockPoolId) throws IOException {
  // Set the ID if we haven't already
  if (null == id) {
    id = bpRegistration;
  }

  if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) {
    throw new IOException("Inconsistent Datanode IDs. Name-node returned "
        + bpRegistration.getDatanodeUuid()
        + ". Expecting " + storage.getDatanodeUuid());
  }
  
  registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DataNode.java

示例2: bpRegistrationSucceeded

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入方法依赖的package包/类
/**
 * Check that the registration returned from a NameNode is consistent
 * with the information in the storage. If the storage is fresh/unformatted,
 * sets the storage ID based on this registration.
 * Also updates the block pool's state in the secret manager.
 */
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
    String blockPoolId) throws IOException {
  id = bpRegistration;

  if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) {
    throw new IOException("Inconsistent Datanode IDs. Name-node returned "
        + bpRegistration.getDatanodeUuid()
        + ". Expecting " + storage.getDatanodeUuid());
  }
  
  registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:DataNode.java

示例3: testChooseReplicaToDelete

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入方法依赖的package包/类
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestOverReplicatedBlocks.java

示例4: testDeadDatanode

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入方法依赖的package包/类
/**
 * Test to ensure namenode rejects request from dead datanode
 * - Start a cluster
 * - Shutdown the datanode and wait for it to be marked dead at the namenode
 * - Send datanode requests to Namenode and make sure it is rejected 
 *   appropriately.
 */
@Test
public void testDeadDatanode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  // wait for datanode to be marked live
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeRegistration reg = 
    DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);

  // Shutdown and wait for datanode to be marked dead
  dn.shutdown();
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);

  DatanodeProtocol dnp = cluster.getNameNodeRpc();
  
  ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(
      new Block(0), 
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
      null) };
  StorageReceivedDeletedBlocks[] storageBlocks = { 
      new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };

  // Ensure blockReceived call from dead datanode is not rejected with
  // IOException, since it's async, but the node remains unregistered.
  dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
  BlockManager bm = cluster.getNamesystem().getBlockManager();
  // IBRs are async, make sure the NN processes all of them.
  bm.flushBlockOps();
  assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());

  // Ensure blockReport from dead datanode is rejected with IOException
  StorageBlockReport[] report = { new StorageBlockReport(
      new DatanodeStorage(reg.getDatanodeUuid()),
      BlockListAsLongs.EMPTY) };
  try {
    dnp.blockReport(reg, poolId, report,
        new BlockReportContext(1, 0, System.nanoTime(), 0L));
    fail("Expected IOException is not thrown");
  } catch (IOException ex) {
    // Expected
  }

  // Ensure heartbeat from dead datanode is rejected with a command
  // that asks datanode to register again
  StorageReport[] rep = { new StorageReport(
      new DatanodeStorage(reg.getDatanodeUuid()),
      false, 0, 0, 0, 0) };
  DatanodeCommand[] cmd =
      dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  assertEquals(1, cmd.length);
  assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
      .getAction());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:68,代码来源:TestDeadDatanode.java

示例5: testChooseReplicaToDelete

import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; //导入方法依赖的package包/类
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<BlockInfo> dnBlocks =
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:TestOverReplicatedBlocks.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration.getDatanodeUuid方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。