当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getNameDirs方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getNameDirs方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getNameDirs方法的具体用法?Java MiniDFSCluster.getNameDirs怎么用?Java MiniDFSCluster.getNameDirs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getNameDirs方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testStartingWithUpgradeInProgressSucceeds

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Make sure that an HA NN will start if a previous upgrade was in progress.
 */
@Test
public void testStartingWithUpgradeInProgressSucceeds() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    // Simulate an upgrade having started.
    for (int i = 0; i < 2; i++) {
      for (URI uri : cluster.getNameDirs(i)) {
        File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS);
        LOG.info("creating previous tmp dir: " + prevTmp);
        assertTrue(prevTmp.mkdirs());
      }
    }

    cluster.restartNameNodes();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestDFSUpgradeWithHA.java

示例2: checkNnPreviousDirExistence

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private static void checkNnPreviousDirExistence(MiniDFSCluster cluster,
    int index, boolean shouldExist) {
  Collection<URI> nameDirs = cluster.getNameDirs(index);
  for (URI nnDir : nameDirs) {
    checkPreviousDirExistence(new File(nnDir), shouldExist);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestDFSUpgradeWithHA.java

示例3: testNameDirError

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testNameDirError() throws IOException {
  LOG.info("Starting testNameDirError");
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
      .build();
  
  Collection<URI> nameDirs = cluster.getNameDirs(0);
  cluster.shutdown();
  cluster = null;
  
  for (URI nameDirUri : nameDirs) {
    File dir = new File(nameDirUri.getPath());
    
    try {
      // Simulate the mount going read-only
      FileUtil.setWritable(dir, false);
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(false).build();
      fail("NN should have failed to start with " + dir + " set unreadable");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "storage directory does not exist or is not accessible", ioe);
    } finally {
      cleanup(cluster);
      cluster = null;
      FileUtil.setWritable(dir, true);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestCheckpoint.java

示例4: getNameNodeCurrentDirs

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
public static List<File> getNameNodeCurrentDirs(MiniDFSCluster cluster, int nnIdx) {
  List<File> nameDirs = Lists.newArrayList();
  for (URI u : cluster.getNameDirs(nnIdx)) {
    nameDirs.add(new File(u.getPath(), "current"));
  }
  return nameDirs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:FSImageTestUtil.java

示例5: testRollbackWithNfs

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test rollback with NFS shared dir.
 */
@Test
public void testRollbackWithNfs() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();

    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, true);
    checkPreviousDirExistence(sharedDir, true);
    assertCTimesEqual(cluster);
    
    // Now shut down the cluster and do the rollback.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkPreviousDirExistence(sharedDir, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestDFSUpgradeWithHA.java

示例6: testRollbackWithJournalNodes

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testRollbackWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkJnPreviousDirExistence(qjCluster, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));

    final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);

    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));

    final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidDuringUpgrade > cidBeforeUpgrade);

    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, true);
    checkJnPreviousDirExistence(qjCluster, true);
    assertCTimesEqual(cluster);
    
    // Shut down the NNs, but deliberately leave the JNs up and running.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidBeforeUpgrade < cidAfterRollback);
    // make sure the committedTxnId has been reset correctly after rollback
    assertTrue(cidDuringUpgrade > cidAfterRollback);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkJnPreviousDirExistence(qjCluster, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:82,代码来源:TestDFSUpgradeWithHA.java

示例7: testImportCheckpoint

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test the importCheckpoint startup option. Verifies:
 * 1. if the NN already contains an image, it will not be allowed
 *   to import a checkpoint.
 * 2. if the NN does not contain an image, importing a checkpoint
 *    succeeds and re-saves the image
 */
@Test
public void testImportCheckpoint() throws Exception {
  Configuration conf = new HdfsConfiguration();
  Path testPath = new Path("/testfile");
  SecondaryNameNode snn = null;
  MiniDFSCluster cluster = null;
  Collection<URI> nameDirs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    nameDirs = cluster.getNameDirs(0);
    
    // Make an entry in the namespace, used for verifying checkpoint
    // later.
    cluster.getFileSystem().mkdirs(testPath);
    
    // Take a checkpoint
    snn = startSecondaryNameNode(conf);
    snn.doCheckpoint();
  } finally {
    cleanup(snn);
    cleanup(cluster);
    cluster = null;
  }
  
  LOG.info("Trying to import checkpoint when the NameNode already " +
  		"contains an image. This should fail.");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
        .startupOption(StartupOption.IMPORT).build();
    fail("NameNode did not fail to start when it already contained " +
    		"an image");
  } catch (IOException ioe) {
    // Expected
    GenericTestUtils.assertExceptionContains(
        "NameNode already contains an image", ioe);
  } finally {
    cleanup(cluster);
    cluster = null;
  }
  
  LOG.info("Removing NN storage contents");
  for(URI uri : nameDirs) {
    File dir = new File(uri.getPath());
    LOG.info("Cleaning " + dir);
    removeAndRecreateDir(dir);
  }
  
  LOG.info("Trying to import checkpoint");
  try {
    cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0)
        .startupOption(StartupOption.IMPORT).build();
    
    assertTrue("Path from checkpoint should exist after import",
        cluster.getFileSystem().exists(testPath));

    // Make sure that the image got saved on import
    FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3));
  } finally {
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getNameDirs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。