当前位置: 首页>>代码示例>>Java>>正文


Java MiniQJMHACluster.Builder方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder方法的典型用法代码示例。如果您正苦于以下问题:Java MiniQJMHACluster.Builder方法的具体用法?Java MiniQJMHACluster.Builder怎么用?Java MiniQJMHACluster.Builder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster的用法示例。


在下文中一共展示了MiniQJMHACluster.Builder方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setUpCluster

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Before
public void setUpCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  
  if (clusterType == TestType.SHARED_DIR_HA) {
    MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(10000);
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(topology)
      .numDataNodes(0)
      .checkExitOnShutdown(false)
      .build();
  } else {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false);
    miniQjmHaCluster = builder.build();
    cluster = miniQjmHaCluster.getDfsCluster();
  }
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  
  cluster.transitionToActive(0);
  fs = HATestUtil.configureFailoverFs(cluster, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestFailureToReadEdits.java

示例2: setUpCluster

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Before
public void setUpCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  
  if (clusterType == TestType.SHARED_DIR_HA) {
    MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology();
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(topology)
      .numDataNodes(0)
      .checkExitOnShutdown(false)
      .build();
  } else {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false);
    miniQjmHaCluster = builder.build();
    cluster = miniQjmHaCluster.getDfsCluster();
  }
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  
  cluster.transitionToActive(0);
  fs = HATestUtil.configureFailoverFs(cluster, conf);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:31,代码来源:TestFailureToReadEdits.java

示例3: testUpgradeWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
/**
 * Make sure that an HA NN can successfully upgrade when configured using
 * JournalNodes.
 */
@Test
public void testUpgradeWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkJnPreviousDirExistence(qjCluster, false);
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));

    // get the value of the committedTxnId in journal nodes
    final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);

    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);

    assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster));
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));

    assertTrue(getCommittedTxnIdValue(qjCluster) > cidBeforeUpgrade);
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:79,代码来源:TestDFSUpgradeWithHA.java

示例4: testFinalizeWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Test
public void testFinalizeWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkJnPreviousDirExistence(qjCluster, false);
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));

    final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    assertTrue(cidBeforeUpgrade <= getCommittedTxnIdValue(qjCluster));
    
    assertTrue(fs.mkdirs(new Path("/foo2")));

    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);

    final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidDuringUpgrade > cidBeforeUpgrade);

    runFinalizeCommand(cluster);

    assertEquals(cidDuringUpgrade, getCommittedTxnIdValue(qjCluster));
    checkClusterPreviousDirExistence(cluster, false);
    checkJnPreviousDirExistence(qjCluster, false);
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:TestDFSUpgradeWithHA.java

示例5: testFinalizeFromSecondNameNodeWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
/**
 * Make sure that even if the NN which initiated the upgrade is in the standby
 * state that we're allowed to finalize.
 */
@Test
public void testFinalizeFromSecondNameNodeWithJournalNodes()
    throws IOException, URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkJnPreviousDirExistence(qjCluster, false);
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    // Make the second NN (not the one that initiated the upgrade) active when
    // the finalize command is run.
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    
    runFinalizeCommand(cluster);
    
    checkClusterPreviousDirExistence(cluster, false);
    checkJnPreviousDirExistence(qjCluster, false);
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:TestDFSUpgradeWithHA.java

示例6: testRollbackWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Test
public void testRollbackWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkJnPreviousDirExistence(qjCluster, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));

    final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);

    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));

    final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidDuringUpgrade > cidBeforeUpgrade);

    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, true);
    checkJnPreviousDirExistence(qjCluster, true);
    assertCTimesEqual(cluster);
    
    // Shut down the NNs, but deliberately leave the JNs up and running.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidBeforeUpgrade < cidAfterRollback);
    // make sure the committedTxnId has been reset correctly after rollback
    assertTrue(cidDuringUpgrade > cidAfterRollback);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkJnPreviousDirExistence(qjCluster, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:82,代码来源:TestDFSUpgradeWithHA.java

示例7: testRollbackWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Test
public void testRollbackWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkJnPreviousDirExistence(qjCluster, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));

    final long cidBeforeUpgrade = getCommittedTxnIdValue(qjCluster);

    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));

    final long cidDuringUpgrade = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidDuringUpgrade > cidBeforeUpgrade);

    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    assertCTimesEqual(cluster);
    
    // Shut down the NNs, but deliberately leave the JNs up and running.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    final long cidAfterRollback = getCommittedTxnIdValue(qjCluster);
    assertTrue(cidBeforeUpgrade < cidAfterRollback);
    // make sure the committedTxnId has been reset correctly after rollback
    assertTrue(cidDuringUpgrade > cidAfterRollback);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkJnPreviousDirExistence(qjCluster, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:82,代码来源:TestDFSUpgradeWithHA.java

示例8: testUpgradeWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
/**
 * Make sure that an HA NN can successfully upgrade when configured using
 * JournalNodes.
 */
@Test
public void testUpgradeWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkJnPreviousDirExistence(qjCluster, false);
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:72,代码来源:TestDFSUpgradeWithHA.java

示例9: testFinalizeWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Test
public void testFinalizeWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkJnPreviousDirExistence(qjCluster, false);
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    runFinalizeCommand(cluster);
    
    checkClusterPreviousDirExistence(cluster, false);
    checkJnPreviousDirExistence(qjCluster, false);
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:58,代码来源:TestDFSUpgradeWithHA.java

示例10: testRollbackWithJournalNodes

import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster; //导入方法依赖的package包/类
@Test
public void testRollbackWithJournalNodes() throws IOException,
    URISyntaxException {
  MiniQJMHACluster qjCluster = null;
  FileSystem fs = null;
  try {
    Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder()
        .numDataNodes(0);
    qjCluster = builder.build();

    MiniDFSCluster cluster = qjCluster.getDfsCluster();
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkJnPreviousDirExistence(qjCluster, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    cluster.restartNameNode(1);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkJnPreviousDirExistence(qjCluster, true);
    assertCTimesEqual(cluster);
    
    // Shut down the NNs, but deliberately leave the JNs up and running.
    Collection<URI> nn1NameDirs = cluster.getNameDirs(0);
    cluster.shutdown();

    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(nn1NameDirs));
    NameNode.doRollback(conf, false);

    // The rollback operation should have rolled back the first NN's local
    // dirs, and the shared dir, but not the other NN's dirs. Those have to be
    // done by bootstrapping the standby.
    checkNnPreviousDirExistence(cluster, 0, false);
    checkJnPreviousDirExistence(qjCluster, false);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (qjCluster != null) {
      qjCluster.shutdown();
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:72,代码来源:TestDFSUpgradeWithHA.java


注:本文中的org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster.Builder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。