当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getNameNodeRpc方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getNameNodeRpc方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getNameNodeRpc方法的具体用法?Java MiniDFSCluster.getNameNodeRpc怎么用?Java MiniDFSCluster.getNameNodeRpc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getNameNodeRpc方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testSecondaryHasVeryOutOfDateImage

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test case where the secondary does a checkpoint, then stops for a while.
 * In the meantime, the NN saves its image several times, so that the
 * logs that connect the 2NN's old checkpoint to the current txid
 * get archived. Then, the 2NN tries to checkpoint again.
 */
@Test
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN saves namespace 3 times
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    for (int i = 0; i < 3; i++) {
      nn.saveNamespace();
    }
    nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // Now the secondary tries to checkpoint again with its
    // old image in memory.
    secondary.doCheckpoint();
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestCheckpoint.java

示例2: testInvalidNetworkTopologiesNotCachedInHdfs

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=180000)
public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
  // start a cluster
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    // bad rack topology
    String racks[] = { "/a/b", "/c" };
    String hosts[] = { "foo1.example.com", "foo2.example.com" };
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).
        racks(racks).hosts(hosts).build();
    cluster.waitActive();
    
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    Assert.assertNotNull(nn);
    
    // Wait for one DataNode to register.
    // The other DataNode will not be able to register up because of the rack mismatch.
    DatanodeInfo[] info;
    while (true) {
      info = nn.getDatanodeReport(DatanodeReportType.LIVE);
      Assert.assertFalse(info.length == 2);
      if (info.length == 1) {
        break;
      }
      Thread.sleep(1000);
    }
    // Set the network topology of the other node to the match the network
    // topology of the node that came up.
    int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
    int invalidIdx = validIdx == 1 ? 0 : 1;
    StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
    LOG.info("datanode " + validIdx + " came up with network location " + 
      info[0].getNetworkLocation());

    // Restart the DN with the invalid topology and wait for it to register.
    cluster.restartDataNode(invalidIdx);
    Thread.sleep(5000);
    while (true) {
      info = nn.getDatanodeReport(DatanodeReportType.LIVE);
      if (info.length == 2) {
        break;
      }
      if (info.length == 0) {
        LOG.info("got no valid DNs");
      } else if (info.length == 1) {
        LOG.info("got one valid DN: " + info[0].getHostName() +
            " (at " + info[0].getNetworkLocation() + ")");
      }
      Thread.sleep(1000);
    }
    Assert.assertEquals(info[0].getNetworkLocation(),
                        info[1].getNetworkLocation());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestNetworkTopology.java

示例3: getRpcServerAddress

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private static String getRpcServerAddress(MiniDFSCluster cluster) {
  NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
  return rpcServer.getClientRpcServer().getListenerAddress().getAddress().toString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestNameNodeRespectsBindHostKeys.java

示例4: getServiceRpcServerAddress

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private static String getServiceRpcServerAddress(MiniDFSCluster cluster) {
  NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
  return rpcServer.getServiceRpcServer().getListenerAddress().getAddress().toString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestNameNodeRespectsBindHostKeys.java

示例5: getLifelineRpcServerAddress

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private static String getLifelineRpcServerAddress(MiniDFSCluster cluster) {
  NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
  return rpcServer.getLifelineRpcServer().getListenerAddress().getAddress()
      .toString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestNameNodeRespectsBindHostKeys.java

示例6: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestFsck.java

示例7: testCheckpointWithFailedStorageDir

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test that, if a storage directory is failed when a checkpoint occurs,
 * the non-failed storage directory receives the checkpoint.
 */
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of a volume -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    StorageDirectory sd1 = storage.getStorageDir(1);
    
    currentDir = sd0.getCurrentDir();
    FileUtil.setExecutable(currentDir, false);

    // Upload checkpoint when NN has a bad storage dir. This should
    // succeed and create the checkpoint in the good dir.
    secondary.doCheckpoint();
    
    GenericTestUtils.assertExists(
        new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
    
    // Restore the good dir
    FileUtil.setExecutable(currentDir, true);
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to both dirs
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.setExecutable(currentDir, true);
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestCheckpoint.java

示例8: testCheckpointWithSeparateDirsAfterNameFails

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test case where the NN is configured with a name-only and an edits-only
 * dir, with storage-restore turned on. In this case, if the name-only dir
 * disappears and comes back, a new checkpoint after it has been restored
 * should function correctly.
 * @throws Exception
 */
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  File base_dir = new File(MiniDFSCluster.getBaseDirectory());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/name-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/edits-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(base_dir, "namesecondary1")).toString());

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
        .manageNameDfsDirs(false).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of its only name dir -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
    currentDir = sd0.getCurrentDir();
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

    // Try to upload checkpoint -- this should fail since there are no
    // valid storage dirs
    try {
      secondary.doCheckpoint();
      fail("Did not fail to checkpoint when there are no valid storage dirs");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "No targets in destination storage", ioe);
    }
    
    // Restore the good dir
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to the restored name dir
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.chmod(currentDir.getAbsolutePath(), "755");
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestCheckpoint.java

示例9: testSaveNamespace

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests saving fs image while transactions are ongoing.
 */
@Test
public void testSaveNamespace() throws Exception {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;

  AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final NamenodeProtocols nn = cluster.getNameNodeRpc();

    FSImage fsimage = namesystem.getFSImage();
    FSEditLog editLog = fsimage.getEditLog();

    startTransactionWorkers(nn, caughtErr);

    for (int i = 0; i < NUM_SAVE_IMAGE && caughtErr.get() == null; i++) {
      try {
        Thread.sleep(20);
      } catch (InterruptedException ignored) {}


      LOG.info("Save " + i + ": entering safe mode");
      namesystem.enterSafeMode(false);

      // Verify edit logs before the save
      // They should start with the first edit after the checkpoint
      long logStartTxId = fsimage.getStorage().getMostRecentCheckpointTxId() + 1; 
      verifyEditLogs(namesystem, fsimage,
          NNStorage.getInProgressEditsFileName(logStartTxId),
          logStartTxId);


      LOG.info("Save " + i + ": saving namespace");
      namesystem.saveNamespace();
      LOG.info("Save " + i + ": leaving safemode");

      long savedImageTxId = fsimage.getStorage().getMostRecentCheckpointTxId();
      
      // Verify that edit logs post save got finalized and aren't corrupt
      verifyEditLogs(namesystem, fsimage,
          NNStorage.getFinalizedEditsFileName(logStartTxId, savedImageTxId),
          logStartTxId);
      
      // The checkpoint id should be 1 less than the last written ID, since
      // the log roll writes the "BEGIN" transaction to the new log.
      assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),
                   editLog.getLastWrittenTxId() - 1);

      namesystem.leaveSafeMode();
      LOG.info("Save " + i + ": complete");
    }
  } finally {
    stopTransactionWorkers();
    if (caughtErr.get() != null) {
      throw new RuntimeException(caughtErr.get());
    }
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestEditLogRace.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getNameNodeRpc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。