当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getNameNode方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getNameNode方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getNameNode方法的具体用法?Java MiniDFSCluster.getNameNode怎么用?Java MiniDFSCluster.getNameNode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getNameNode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkNameNodeFiles

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * start with -importCheckpoint option and verify that the files are in separate directories and of the right length
 * @throws IOException
 */
private void checkNameNodeFiles() throws IOException{

  // start namenode with import option
  LOG.info("-- about to start DFS cluster");
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(config)
                                .format(false)
                                .manageDataDfsDirs(false)
                                .manageNameDfsDirs(false)
                                .startupOption(IMPORT).build();
    cluster.waitActive();
    LOG.info("--NN started with checkpoint option");
    NameNode nn = cluster.getNameNode();
    assertNotNull(nn);	
    // Verify that image file sizes did not change.
    FSImage image = nn.getFSImage();
    verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
  } finally {
    if(cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestStartup.java

示例2: testClusterIdMismatch

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testClusterIdMismatch() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .build();
  try {
    cluster.waitActive();

    DataNode dn = cluster.getDataNodes().get(0);
    BPOfferService [] bposs = dn.getAllBpOs(); 
    LOG.info("dn bpos len (should be 2):" + bposs.length);
    Assert.assertEquals("should've registered with two namenodes", bposs.length,2);
    
    // add another namenode
    cluster.addNameNode(conf, 9938);
    Thread.sleep(500);// lets wait for the registration to happen
    bposs = dn.getAllBpOs(); 
    LOG.info("dn bpos len (should be 3):" + bposs.length);
    Assert.assertEquals("should've registered with three namenodes", bposs.length,3);
    
    // change cluster id and another Namenode
    StartupOption.FORMAT.setClusterId("DifferentCID");
    cluster.addNameNode(conf, 9948);
    NameNode nn4 = cluster.getNameNode(3);
    assertNotNull("cannot create nn4", nn4);

    Thread.sleep(500);// lets wait for the registration to happen
    bposs = dn.getAllBpOs(); 
    LOG.info("dn bpos len (still should be 3):" + bposs.length);
    Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
  } finally {
      cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestDataNodeMultipleRegistrations.java

示例3: testRaceWhileNNStartup

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Make sure a RetriableException is thrown when rpcServer is null in
 * NamenodeWebHdfsMethods.
 */
@Test
public void testRaceWhileNNStartup() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final NameNode namenode = cluster.getNameNode();
    final NamenodeProtocols rpcServer = namenode.getRpcServer();
    Whitebox.setInternalState(namenode, "rpcServer", null);

    final Path foo = new Path("/foo");
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);
    try {
      webHdfs.mkdirs(foo);
      fail("Expected RetriableException");
    } catch (RetriableException e) {
      GenericTestUtils.assertExceptionContains("Namenode is in startup mode",
          e);
    }
    Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestWebHDFS.java

示例4: testNameNodeMultipleSwitchesUsingBKJM

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * NameNode should load the edits correctly if the applicable edits are
 * present in the BKJM.
 */
@Test
public void testNameNodeMultipleSwitchesUsingBKJM() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/correctEditLogSelection").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .manageNameDfsSharedDirs(false).build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    cluster.waitActive();
    cluster.transitionToActive(0);
    nn1.getRpcServer().rollEditLog(); // Roll Edits from current Active.
    // Transition to standby current active gracefully.
    cluster.transitionToStandby(0);
    // Make the other Active and Roll edits multiple times
    cluster.transitionToActive(1);
    nn2.getRpcServer().rollEditLog();
    nn2.getRpcServer().rollEditLog();
    // Now One more failover. So NN1 should be able to failover successfully.
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestBookKeeperAsHASharedDir.java

示例5: test

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    FSNamesystem fsn = cluster.getNameNode().namesystem;

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");

    String snapshotStats = (String) (mbs.getAttribute(mxbeanName,
        "SnapshotStats"));

    @SuppressWarnings("unchecked")
    Map<String, Object> stat = (Map<String, Object>) JSON
        .parse(snapshotStats);

    assertTrue(stat.containsKey("SnapshottableDirectories")
        && (Long) stat.get("SnapshottableDirectories") == fsn
            .getNumSnapshottableDirs());
    assertTrue(stat.containsKey("Snapshots")
        && (Long) stat.get("Snapshots") == fsn.getNumSnapshots());

    Object pendingDeletionBlocks = mbs.getAttribute(mxbeanName,
      "PendingDeletionBlocks");
    assertNotNull(pendingDeletionBlocks);
    assertTrue(pendingDeletionBlocks instanceof Long);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFSNamesystemMBean.java

示例6: testWithFSNamesystemWriteLock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testWithFSNamesystemWriteLock() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  FSNamesystem fsn = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    fsn = cluster.getNameNode().namesystem;
    fsn.writeLock();

    MBeanClient client = new MBeanClient();
    client.start();
    client.join(20000);
    assertTrue("JMX calls are blocked when FSNamesystem's writerlock" +
        "is owned by another thread", client.succeeded);
    client.interrupt();
  } finally {
    if (fsn != null && fsn.hasWriteLock()) {
      fsn.writeUnlock();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestFSNamesystemMBean.java

示例7: testCheckpointSignature

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testCheckpointSignature() throws IOException {

  MiniDFSCluster cluster = null;
  Configuration conf = new HdfsConfiguration();

  SecondaryNameNode secondary = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();
    NameNode nn = cluster.getNameNode();
    NamenodeProtocols nnRpc = nn.getRpcServer();

    secondary = startSecondaryNameNode(conf);
    // prepare checkpoint image
    secondary.doCheckpoint();
    CheckpointSignature sig = nnRpc.rollEditLog();
    // manipulate the CheckpointSignature fields
    sig.setBlockpoolID("somerandomebpid");
    sig.clusterID = "somerandomcid";
    try {
      sig.validateStorageInfo(nn.getFSImage()); // this should fail
      assertTrue("This test is expected to fail.", false);
    } catch (Exception ignored) {
    }
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestCheckpoint.java

示例8: testLastContactTime

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@SuppressWarnings({ "unchecked" })
@Test
public void testLastContactTime() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();

    FSNamesystem fsn = cluster.getNameNode().namesystem;

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
      "Hadoop:service=NameNode,name=NameNodeInfo");

    // Define include file to generate deadNodes metrics
    FileSystem localFileSys = FileSystem.getLocal(conf);
    Path workingDir = localFileSys.getWorkingDirectory();
    Path dir = new Path(workingDir,
      "build/test/data/temp/TestNameNodeMXBean");
    Path includeFile = new Path(dir, "include");
    assertTrue(localFileSys.mkdirs(dir));
    StringBuilder includeHosts = new StringBuilder();
    for(DataNode dn : cluster.getDataNodes()) {
      includeHosts.append(dn.getDisplayName()).append("\n");
    }
    DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
    conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
    fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);

    cluster.stopDataNode(0);
    while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes()
      != 2 ) {
      Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
    }

    // get attribute deadnodeinfo
    String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
      "DeadNodes"));
    assertEquals(fsn.getDeadNodes(), deadnodeinfo);
    Map<String, Map<String, Object>> deadNodes =
      (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
    assertTrue(deadNodes.size() > 0);
    for (Map<String, Object> deadNode : deadNodes.values()) {
      assertTrue(deadNode.containsKey("lastContact"));
      assertTrue(deadNode.containsKey("decommissioned"));
      assertTrue(deadNode.containsKey("xferaddr"));
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestNameNodeMXBean.java

示例9: testBackupNodeTailsEdits

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Ensure that the backupnode will tail edits from the NN
 * and keep in sync, even while the NN rolls, checkpoints
 * occur, etc.
 */
@Test
public void testBackupNodeTailsEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  HAUtil.setAllowStandbyReads(conf, true);
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  BackupNode backup = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(0).build();
    fileSys = cluster.getFileSystem();
    backup = startBackupNode(conf, StartupOption.BACKUP, 1);
    
    BackupImage bnImage = (BackupImage) backup.getFSImage();
    testBNInSync(cluster, backup, 1);
    
    // Force a roll -- BN should roll with NN.
    NameNode nn = cluster.getNameNode();
    NamenodeProtocols nnRpc = nn.getRpcServer();
    nnRpc.rollEditLog();
    assertEquals(bnImage.getEditLog().getCurSegmentTxId(),
        nn.getFSImage().getEditLog().getCurSegmentTxId());
    
    // BN should stay in sync after roll
    testBNInSync(cluster, backup, 2);
    
    long nnImageBefore =
      nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
    // BN checkpoint
    backup.doCheckpoint();
    
    // NN should have received a new image
    long nnImageAfter =
      nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
    
    assertTrue("nn should have received new checkpoint. before: " +
        nnImageBefore + " after: " + nnImageAfter,
        nnImageAfter > nnImageBefore);

    // BN should stay in sync after checkpoint
    testBNInSync(cluster, backup, 3);

    // Stop BN
    StorageDirectory sd = bnImage.getStorage().getStorageDir(0);
    backup.stop();
    backup = null;
    
    // When shutting down the BN, it shouldn't finalize logs that are
    // still open on the NN
    EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
    assertEquals(editsLog.getFirstTxId(),
        nn.getFSImage().getEditLog().getCurSegmentTxId());
    assertTrue("Should not have finalized " + editsLog,
        editsLog.isInProgress());
    
    // do some edits
    assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
    
    // start a new backup node
    backup = startBackupNode(conf, StartupOption.BACKUP, 1);

    testBNInSync(cluster, backup, 4);
    assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
  } finally {
    LOG.info("Shutting down...");
    if (backup != null) backup.stop();
    if (fileSys != null) fileSys.close();
    if (cluster != null) cluster.shutdown();
  }
  
  assertStorageDirsMatch(cluster.getNameNode(), backup);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:79,代码来源:TestBackupNode.java

示例10: testTailer

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testTailer() throws IOException, InterruptedException,
    ServiceFailedException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

  HAUtil.setAllowStandbyReads(conf, true);
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(0)
    .build();
  cluster.waitActive();
  
  cluster.transitionToActive(0);
  
  NameNode nn1 = cluster.getNameNode(0);
  NameNode nn2 = cluster.getNameNode(1);
  try {
    for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
      NameNodeAdapter.mkdirs(nn1, getDirPath(i),
          new PermissionStatus("test","test", new FsPermission((short)00755)),
          true);
    }
    
    HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
    
    for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
      assertTrue(NameNodeAdapter.getFileInfo(nn2,
          getDirPath(i), false).isDir());
    }
    
    for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
      NameNodeAdapter.mkdirs(nn1, getDirPath(i),
          new PermissionStatus("test","test", new FsPermission((short)00755)),
          true);
    }
    
    HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
    
    for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
      assertTrue(NameNodeAdapter.getFileInfo(nn2,
          getDirPath(i), false).isDir());
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestEditLogTailer.java

示例11: testStandbyIsHot

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testStandbyIsHot() throws Exception {
  Configuration conf = new Configuration();
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    
    Thread.sleep(1000);
    System.err.println("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    System.err.println("==================================");

    // Block locations should show up on standby.
    LOG.info("Waiting for block locations to appear on standby node");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);

    // Trigger immediate heartbeats and block reports so
    // that the active "trusts" all of the DNs
    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // Change replication
    LOG.info("Changing replication to 1");
    fs.setReplication(TEST_FILE_PATH, (short)1);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    waitForBlockLocations(cluster, nn1, TEST_FILE, 1);

    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for lowered replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
    
    // Change back to 3
    LOG.info("Changing replication to 3");
    fs.setReplication(TEST_FILE_PATH, (short)3);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());
    nn1.getRpcServer().rollEditLog();
    
    LOG.info("Waiting for higher replication to show up on standby");
    waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:TestStandbyIsHot.java

示例12: testDatanodeRestarts

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-2795:
 *  - Start an HA cluster with a DN.
 *  - Write several blocks to the FS with replication 1.
 *  - Shutdown the DN
 *  - Wait for the NNs to declare the DN dead. All blocks will be under-replicated.
 *  - Restart the DN.
 * In the bug, the standby node would only very slowly notice the blocks returning
 * to the cluster.
 */
@Test(timeout=60000)
public void testDatanodeRestarts() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  // We read from the standby to watch block locations
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  try {
    NameNode nn0 = cluster.getNameNode(0);
    NameNode nn1 = cluster.getNameNode(1);

    cluster.transitionToActive(0);
    
    // Create 5 blocks.
    DFSTestUtil.createFile(cluster.getFileSystem(0), 
        TEST_FILE_PATH, 5*1024, (short)1, 1L);
    
    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
    
    // Stop the DN.
    DataNode dn = cluster.getDataNodes().get(0);
    String dnName = dn.getDatanodeId().getXferAddr(); 
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    
    // Make sure both NNs register it as dead.
    BlockManagerTestUtil.noticeDeadDatanode(nn0, dnName);
    BlockManagerTestUtil.noticeDeadDatanode(nn1, dnName);
    
    BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    assertEquals(5, nn0.getNamesystem().getUnderReplicatedBlocks());
    
    // The SBN will not have any blocks in its neededReplication queue
    // since the SBN doesn't process replication.
    assertEquals(0, nn1.getNamesystem().getUnderReplicatedBlocks());
    
    LocatedBlocks locs = nn1.getRpcServer().getBlockLocations(
        TEST_FILE, 0, 1);
    assertEquals("Standby should have registered that the block has no replicas",
        0, locs.get(0).getLocations().length);
    
    cluster.restartDataNode(dnProps);
    // Wait for both NNs to re-register the DN.
    cluster.waitActive(0);
    cluster.waitActive(1);
    
    BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    assertEquals(0, nn0.getNamesystem().getUnderReplicatedBlocks());
    assertEquals(0, nn1.getNamesystem().getUnderReplicatedBlocks());
    
    locs = nn1.getRpcServer().getBlockLocations(
        TEST_FILE, 0, 1);
    assertEquals("Standby should have registered that the block has replicas again",
        1, locs.get(0).getLocations().length);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:75,代码来源:TestStandbyIsHot.java

示例13: testInvalidateBlock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testInvalidateBlock() throws Exception {
  Configuration conf = new Configuration();
  HAUtil.setAllowStandbyReads(conf, true);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(3)
      .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);

    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);

    Thread.sleep(1000);
    LOG.info("==================================");
    DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
    // Have to force an edit log roll so that the standby catches up
    nn1.getRpcServer().rollEditLog();
    LOG.info("==================================");

    // delete the file
    fs.delete(TEST_FILE_PATH, false);
    BlockManagerTestUtil.computeAllPendingWork(
        nn1.getNamesystem().getBlockManager());

    nn1.getRpcServer().rollEditLog();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

    cluster.triggerHeartbeats();
    cluster.triggerBlockReports();

    // standby nn doesn't need to invalidate blocks.
    assertEquals(0,
        nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestStandbyBlockManagement.java

示例14: testFsckMissingReplicas

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestFsck.java

示例15: testFsckMisPlacedReplicas

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestFsck.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getNameNode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。