当前位置: 首页>>代码示例>>Java>>正文


Java DataNodeTestUtils.getDNRegistrationForBP方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.getDNRegistrationForBP方法的典型用法代码示例。如果您正苦于以下问题:Java DataNodeTestUtils.getDNRegistrationForBP方法的具体用法?Java DataNodeTestUtils.getDNRegistrationForBP怎么用?Java DataNodeTestUtils.getDNRegistrationForBP使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils的用法示例。


在下文中一共展示了DataNodeTestUtils.getDNRegistrationForBP方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
      KEEPALIVE_TIMEOUT);
  conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
      0);
  
  cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(1).build();
  fs = cluster.getFileSystem();
  dfsClient = ((DistributedFileSystem)fs).dfs;
  dfsClient.peerCache.clear();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  dn = cluster.getDataNodes().get(0);
  DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
      dn, poolId);
  dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:TestDataTransferKeepalive.java

示例2: setup

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
      KEEPALIVE_TIMEOUT);
  conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
      0);
  
  cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(1).build();
  fs = cluster.getFileSystem();
  dfsClient = ((DistributedFileSystem)fs).dfs;
  dfsClient.socketCache.clear();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  dn = cluster.getDataNodes().get(0);
  DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
      dn, poolId);
  dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:TestDataTransferKeepalive.java

示例3: testDataNodeRedirect

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void testDataNodeRedirect(Path path) throws IOException {
  // Create the file
  if (hdfs.exists(path)) {
    hdfs.delete(path, true);
  }
  FSDataOutputStream out = hdfs.create(path, (short) 1);
  out.writeBytes("0123456789");
  out.close();

  // Get the path's block location so we can determine
  // if we were redirected to the right DN.
  BlockLocation[] locations = hdfs.getFileBlockLocations(path, 0, 10);
  String xferAddr = locations[0].getNames()[0];

  // Connect to the NN to get redirected
  URL u = hftpFs.getNamenodeURL(
      "/data" + ServletUtil.encodePath(path.toUri().getPath()),
      "ugi=userx,groupy");
  HttpURLConnection conn = (HttpURLConnection) u.openConnection();
  HttpURLConnection.setFollowRedirects(true);
  conn.connect();
  conn.getInputStream();

  boolean checked = false;
  // Find the datanode that has the block according to locations
  // and check that the URL was redirected to this DN's info port
  for (DataNode node : cluster.getDataNodes()) {
    DatanodeRegistration dnR = DataNodeTestUtils.getDNRegistrationForBP(node,
        blockPoolId);
    if (dnR.getXferAddr().equals(xferAddr)) {
      checked = true;
      assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
    }
  }
  assertTrue("The test never checked that location of "
      + "the block and hftp desitnation are the same", checked);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestHftpFileSystem.java

示例4: testDeadNodeAsBlockTarget

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
@Test
public void testDeadNodeAsBlockTarget() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  // wait for datanode to be marked live
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster
      .getDataNodes().get(0), poolId);
  // Get the updated datanode descriptor
  BlockManager bm = cluster.getNamesystem().getBlockManager();
  DatanodeManager dm = bm.getDatanodeManager();
  Node clientNode = dm.getDatanode(reg);

  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true,
      20000);

  // Shutdown and wait for datanode to be marked dead
  dn.shutdown();
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false,
      20000);
  // Get the updated datanode descriptor available in DNM
  // choose the targets, but local node should not get selected as this is not
  // part of the cluster anymore
  DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
      clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
      false);
  for (DatanodeStorageInfo datanodeStorageInfo : results) {
    assertFalse("Dead node should not be choosen", datanodeStorageInfo
        .getDatanodeDescriptor().equals(clientNode));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestDeadDatanode.java

示例5: testDataNodeRedirect

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void testDataNodeRedirect(Path path) throws IOException {
  // Create the file
  if (hdfs.exists(path)) {
    hdfs.delete(path, true);
  }
  FSDataOutputStream out = hdfs.create(path, (short)1);
  out.writeBytes("0123456789");
  out.close();

  // Get the path's block location so we can determine
  // if we were redirected to the right DN.
  BlockLocation[] locations = 
      hdfs.getFileBlockLocations(path, 0, 10);
  String xferAddr = locations[0].getNames()[0];

  // Connect to the NN to get redirected
  URL u = hftpFs.getNamenodeURL(
      "/data" + ServletUtil.encodePath(path.toUri().getPath()), 
      "ugi=userx,groupy");
  HttpURLConnection conn = (HttpURLConnection)u.openConnection();
  HttpURLConnection.setFollowRedirects(true);
  conn.connect();
  conn.getInputStream();

  boolean checked = false;
  // Find the datanode that has the block according to locations
  // and check that the URL was redirected to this DN's info port
  for (DataNode node : cluster.getDataNodes()) {
    DatanodeRegistration dnR = 
      DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
    if (dnR.getXferAddr().equals(xferAddr)) {
      checked = true;
      assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
    }
  }
  assertTrue("The test never checked that location of " +
             "the block and hftp desitnation are the same", checked);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:39,代码来源:TestHftpFileSystem.java

示例6: testDataNodeRedirect

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
private void testDataNodeRedirect(Path path) throws IOException {
  // Create the file
  if (hdfs.exists(path)) {
    hdfs.delete(path, true);
  }
  FSDataOutputStream out = hdfs.create(path, (short) 1);
  out.writeBytes("0123456789");
  out.close();

  // Get the path's block location so we can determine
  // if we were redirected to the right DN.
  BlockLocation[] locations = hdfs.getFileBlockLocations(path, 0, 10);
  String xferAddr = locations[0].getNames()[0];

  // Connect to the NN to get redirected
  URL u = hftpFs.getNamenodeURL(
      "/data" + ServletUtil.encodePath(path.toUri().getPath()),
      "ugi=userx,groupy");
  HttpURLConnection conn = (HttpURLConnection) u.openConnection();
  HttpURLConnection.setFollowRedirects(true);
  conn.connect();
  conn.getInputStream();

  boolean checked = false;
  // Find the datanode that has the block according to locations
  // and check that the URL was redirected to this DN's info port
  for (DataNode node : cluster.getDataNodes()) {
    DatanodeRegistration dnR =
        DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
    if (dnR.getXferAddr().equals(xferAddr)) {
      checked = true;
      assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
    }
  }
  assertTrue("The test never checked that location of " +
      "the block and hftp desitnation are the same", checked);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:38,代码来源:TestHftpFileSystem.java

示例7: testArrayOutOfBoundsException

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/** Test the case that a replica is reported corrupt while it is not
 * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
 * See Hadoop-4351.
 */
@Test
public void testArrayOutOfBoundsException() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    
    FileSystem fs = cluster.getFileSystem();
    final Path FILE_PATH = new Path("/tmp.txt");
    final long FILE_LEN = 1L;
    DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
    
    // get the block
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = cluster.getInstanceStorageDir(0, 0);
    File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
    assertTrue("Data directory does not exist", dataDir.exists());
    ExtendedBlock blk = getBlock(bpid, dataDir);
    if (blk == null) {
      storageDir = cluster.getInstanceStorageDir(0, 1);
      dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      blk = getBlock(bpid, dataDir);
    }
    assertFalse("Data directory does not contain any blocks or there was an "
        + "IO error", blk==null);

    // start a third datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 3);
    DataNode dataNode = datanodes.get(2);
    
    // report corrupted block by the third datanode
    DatanodeRegistration dnR = 
      DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
    FSNamesystem ns = cluster.getNamesystem();
    ns.writeLock();
    try {
      cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
          blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
    } finally {
      ns.writeUnlock();
    }
    
    // open the file
    fs.open(FILE_PATH);
    
    //clean up
    fs.delete(FILE_PATH, false);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestFileCorruption.java

示例8: testProcesOverReplicateBlock

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
 * It make sure that it won't treat corrupt replicas as valid ones 
 * thus prevents NN deleting valid replicas but keeping
 * corrupt ones.
 */
@Test
public void testProcesOverReplicateBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)3);
    
    // corrupt the block on datanode 0
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    assertTrue(cluster.corruptReplica(0, block));
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    // remove block scanner log to trigger block scanning
    File scanCursor = new File(new File(MiniDFSCluster.getFinalizedDir(
        cluster.getInstanceStorageDir(0, 0),
        cluster.getNamesystem().getBlockPoolId()).getParent()).getParent(),
        "scanner.cursor");
    //wait for one minute for deletion to succeed;
    for(int i = 0; !scanCursor.delete(); i++) {
      assertTrue("Could not delete " + scanCursor.getAbsolutePath() +
          " in one minute", i < 60);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    
    // restart the datanode so the corrupt replica will be detected
    cluster.restartDataNode(dnProps);
    DFSTestUtil.waitReplication(fs, fileName, (short)2);
    
    String blockPoolId = cluster.getNamesystem().getBlockPoolId();
    final DatanodeID corruptDataNode = 
      DataNodeTestUtils.getDNRegistrationForBP(
          cluster.getDataNodes().get(2), blockPoolId);
       
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
    try {
      namesystem.writeLock();
      synchronized(hm) {
        // set live datanode's remaining space to be 0 
        // so they will be chosen to be deleted when over-replication occurs
        String corruptMachineName = corruptDataNode.getXferAddr();
        for (DatanodeDescriptor datanode : hm.getDatanodes()) {
          if (!corruptMachineName.equals(datanode.getXferAddr())) {
            datanode.getStorageInfos()[0].setUtilizationForTesting(100L, 100L, 0, 100L);
            datanode.updateHeartbeat(
                BlockManagerTestUtil.getStorageReportsForDatanode(datanode),
                0L, 0L, 0, 0, null);
          }
        }

        // decrease the replication factor to 1; 
        NameNodeAdapter.setReplication(namesystem, fileName.toString(), (short)1);

        // corrupt one won't be chosen to be excess one
        // without 4910 the number of live replicas would be 0: block gets lost
        assertEquals(1, bm.countNodes(block.getLocalBlock()).liveReplicas());
      }
    } finally {
      namesystem.writeUnlock();
    }
    
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:82,代码来源:TestOverReplicatedBlocks.java

示例9: testChooseReplicaToDelete

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestOverReplicatedBlocks.java

示例10: testArrayOutOfBoundsException

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/** Test the case that a replica is reported corrupt while it is not
 * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
 * See Hadoop-4351.
 */
@Test
public void testArrayOutOfBoundsException() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    
    FileSystem fs = cluster.getFileSystem();
    final Path FILE_PATH = new Path("/tmp.txt");
    final long FILE_LEN = 1L;
    DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
    
    // get the block
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
    assertFalse("Data directory does not contain any blocks or there was an "
        + "IO error", blk==null);

    // start a third datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 3);
    DataNode dataNode = datanodes.get(2);
    
    // report corrupted block by the third datanode
    DatanodeRegistration dnR = 
      DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
    FSNamesystem ns = cluster.getNamesystem();
    ns.writeLock();
    try {
      cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
          blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
    } finally {
      ns.writeUnlock();
    }
    
    // open the file
    fs.open(FILE_PATH);
    
    //clean up
    fs.delete(FILE_PATH, false);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:53,代码来源:TestFileCorruption.java

示例11: testDeadDatanode

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Test to ensure namenode rejects request from dead datanode
 * - Start a cluster
 * - Shutdown the datanode and wait for it to be marked dead at the namenode
 * - Send datanode requests to Namenode and make sure it is rejected 
 *   appropriately.
 */
@Test
public void testDeadDatanode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();

  String poolId = cluster.getNamesystem().getBlockPoolId();
  // wait for datanode to be marked live
  DataNode dn = cluster.getDataNodes().get(0);
  DatanodeRegistration reg = 
    DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);

  // Shutdown and wait for datanode to be marked dead
  dn.shutdown();
  DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);

  DatanodeProtocol dnp = cluster.getNameNodeRpc();
  
  ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(
      new Block(0), 
      ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
      null) };
  StorageReceivedDeletedBlocks[] storageBlocks = { 
      new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };

  // Ensure blockReceived call from dead datanode is not rejected with
  // IOException, since it's async, but the node remains unregistered.
  dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
  BlockManager bm = cluster.getNamesystem().getBlockManager();
  // IBRs are async, make sure the NN processes all of them.
  bm.flushBlockOps();
  assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());

  // Ensure blockReport from dead datanode is rejected with IOException
  StorageBlockReport[] report = { new StorageBlockReport(
      new DatanodeStorage(reg.getDatanodeUuid()),
      BlockListAsLongs.EMPTY) };
  try {
    dnp.blockReport(reg, poolId, report,
        new BlockReportContext(1, 0, System.nanoTime(), 0L));
    fail("Expected IOException is not thrown");
  } catch (IOException ex) {
    // Expected
  }

  // Ensure heartbeat from dead datanode is rejected with a command
  // that asks datanode to register again
  StorageReport[] rep = { new StorageReport(
      new DatanodeStorage(reg.getDatanodeUuid()),
      false, 0, 0, 0, 0) };
  DatanodeCommand[] cmd =
      dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  assertEquals(1, cmd.length);
  assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
      .getAction());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:68,代码来源:TestDeadDatanode.java

示例12: testChooseReplicaToDelete

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<BlockInfo> dnBlocks =
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:TestOverReplicatedBlocks.java

示例13: testStorageWithRemainingCapacity

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Tests that a namenode doesn't choose a datanode with full disks to 
 * store blocks.
 * @throws Exception
 */
@Test
public void testStorageWithRemainingCapacity() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = FileSystem.get(conf);
  Path file1 = null;
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().
      		get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem,
  		  nodeReg);
    // By default, MiniDFSCluster will create 1 datanode with 2 storages.
    // Assigning 64k for remaining storage capacity and will 
    //create a file with 100k.
    for(DatanodeStorageInfo storage:  dd.getStorageInfos()) { 
  	  storage.setUtilizationForTesting(65536, 0, 65536, 0);
    }
    //sum of the remaining capacity of both the storages
    dd.setRemaining(131072);
    file1 = new Path("testRemainingStorage.dat");
    try {
      DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short)1,
      		0x1BAD5EED);
    }
    catch (RemoteException re) {
  	  GenericTestUtils.assertExceptionContains("nodes instead of "
  	  		+ "minReplication", re);
    }
  }
  finally {
    // Clean up
    assertTrue(fs.exists(file1));
    fs.delete(file1, true);
    assertTrue(!fs.exists(file1));
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestBlockManager.java

示例14: testChooseReplicaToDelete

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = 
        namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdate();
    } while(now() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:66,代码来源:TestOverReplicatedBlocks.java

示例15: testArrayOutOfBoundsException

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/** Test the case that a replica is reported corrupt while it is not
 * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
 * See Hadoop-4351.
 */
@Test
public void testArrayOutOfBoundsException() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    
    FileSystem fs = cluster.getFileSystem();
    final Path FILE_PATH = new Path("/tmp.txt");
    final long FILE_LEN = 1L;
    DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
    
    // get the block
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = cluster.getInstanceStorageDir(0, 0);
    File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
    ExtendedBlock blk = getBlock(bpid, dataDir);
    if (blk == null) {
      storageDir = cluster.getInstanceStorageDir(0, 1);
      dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      blk = getBlock(bpid, dataDir);
    }
    assertFalse(blk==null);

    // start a third datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 3);
    DataNode dataNode = datanodes.get(2);
    
    // report corrupted block by the third datanode
    DatanodeRegistration dnR = 
      DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
    FSNamesystem ns = cluster.getNamesystem();
    ns.writeLock();
    try {
      cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
          blk, new DatanodeInfo(dnR), "TEST");
    } finally {
      ns.writeUnlock();
    }
    
    // open the file
    fs.open(FILE_PATH);
    
    //clean up
    fs.delete(FILE_PATH, false);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
  
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:58,代码来源:TestFileCorruption.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.getDNRegistrationForBP方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。