当前位置: 首页>>代码示例>>Java>>正文


Java MiniJournalCluster.shutdown方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.qjournal.MiniJournalCluster.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java MiniJournalCluster.shutdown方法的具体用法?Java MiniJournalCluster.shutdown怎么用?Java MiniJournalCluster.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.qjournal.MiniJournalCluster的用法示例。


在下文中一共展示了MiniJournalCluster.shutdown方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: determineMaxIpcNumber

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
/**
 * Run through the creation of a log without any faults injected,
 * and count how many RPCs are made to each node. This sets the
 * bounds for the other test cases, so they can exhaustively explore
 * the space of potential failures.
 */
private static long determineMaxIpcNumber() throws Exception {
  Configuration conf = new Configuration();
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
  QuorumJournalManager qjm = null;
  long ret;
  try {
    qjm = createInjectableQJM(cluster);
    qjm.format(FAKE_NSINFO);
    doWorkload(cluster, qjm);
    
    SortedSet<Integer> ipcCounts = Sets.newTreeSet();
    for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
      InvocationCountingChannel ch = (InvocationCountingChannel)l;
      ch.waitForAllPendingCalls();
      ipcCounts.add(ch.getRpcCount());
    }

    // All of the loggers should have sent the same number of RPCs, since there
    // were no failures.
    assertEquals(1, ipcCounts.size());
    
    ret = ipcCounts.first();
    LOG.info("Max IPC count = " + ret);
  } finally {
    IOUtils.closeStream(qjm);
    cluster.shutdown();
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestQJMWithFaults.java

示例2: determineMaxIpcNumber

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
/**
 * Run through the creation of a log without any faults injected,
 * and count how many RPCs are made to each node. This sets the
 * bounds for the other test cases, so they can exhaustively explore
 * the space of potential failures.
 */
private static long determineMaxIpcNumber() throws Exception {
  Configuration conf = new Configuration();
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
  cluster.waitActive();
  QuorumJournalManager qjm = null;
  long ret;
  try {
    qjm = createInjectableQJM(cluster);
    qjm.format(FAKE_NSINFO);
    doWorkload(cluster, qjm);
    
    SortedSet<Integer> ipcCounts = Sets.newTreeSet();
    for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
      InvocationCountingChannel ch = (InvocationCountingChannel)l;
      ch.waitForAllPendingCalls();
      ipcCounts.add(ch.getRpcCount());
    }

    // All of the loggers should have sent the same number of RPCs, since there
    // were no failures.
    assertEquals(1, ipcCounts.size());
    
    ret = ipcCounts.first();
    LOG.info("Max IPC count = " + ret);
  } finally {
    IOUtils.closeStream(qjm);
    cluster.shutdown();
  }
  return ret;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:TestQJMWithFaults.java

示例3: determineMaxIpcNumber

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
/**
 * Run through the creation of a log without any faults injected,
 * and count how many RPCs are made to each node. This sets the
 * bounds for the other test cases, so they can exhaustively explore
 * the space of potential failures.
 */
private static long determineMaxIpcNumber() throws Exception {
  Configuration conf = new Configuration();
  MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf).build();
  QuorumJournalManager qjm = null;
  long ret;
  try {
    qjm = createInjectableQJM(cluster);
    qjm.transitionJournal(FAKE_NSINFO, Transition.FORMAT,
        StartupOption.REGULAR);
    doWorkload(cluster, qjm);
    
    SortedSet<Integer> ipcCounts = Sets.newTreeSet();
    for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
      InvocationCountingChannel ch = (InvocationCountingChannel)l;
      ch.waitForAllPendingCalls();
      ipcCounts.add(ch.getRpcCount());
    }

    // All of the loggers should have sent the same number of RPCs, since there
    // were no failures.
    assertEquals(1, ipcCounts.size());
    
    ret = ipcCounts.first();
    LOG.info("Max IPC count = " + ret);
  } finally {
    IOUtils.closeStream(qjm);
    cluster.shutdown();
  }
  return ret;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:TestQJMWithFaults.java

示例4: testRollbackWithQJM

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
@Test
public void testRollbackWithQJM() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  MiniJournalCluster mjc = null;
  MiniDFSCluster cluster = null;
  final Path foo = new Path("/foo");
  final Path bar = new Path("/bar");

  try {
    mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(
        NUM_JOURNAL_NODES).build();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc
        .getQuorumJournalURI(JOURNAL_ID).toString());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();

    DistributedFileSystem dfs = cluster.getFileSystem();
    final DFSAdmin dfsadmin = new DFSAdmin(conf);
    dfs.mkdirs(foo);

    // start rolling upgrade
    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    Assert.assertEquals(0,
        dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
    dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    // create new directory
    dfs.mkdirs(bar);
    dfs.close();

    // rollback
    cluster.restartNameNode("-rollingUpgrade", "rollback");
    // make sure /foo is still there, but /bar is not
    dfs = cluster.getFileSystem();
    Assert.assertTrue(dfs.exists(foo));
    Assert.assertFalse(dfs.exists(bar));

    // check storage in JNs
    for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
      File dir = mjc.getCurrentDir(0, JOURNAL_ID);
      // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
      // marker, mkdir, endSegment)
      checkJNStorage(dir, 4, 7);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
    if (mjc != null) {
      mjc.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestRollingUpgradeRollback.java

示例5: testRecoverAfterDoubleFailures

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
/**
 * Sets up two of the nodes to each drop a single RPC, at all
 * possible combinations of RPCs. This may result in the
 * active writer failing to write. After this point, a new writer
 * should be able to recover and continue writing without
 * data loss.
 */
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
  final long MAX_IPC_NUMBER = determineMaxIpcNumber();
  
  for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
    for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
      String injectionStr = "(" + failA + ", " + failB + ")";
      
      LOG.info("\n\n-------------------------------------------\n" +
          "Beginning test, failing at " + injectionStr + "\n" +
          "-------------------------------------------\n\n");
      
      MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
        .build();
      QuorumJournalManager qjm = null;
      try {
        qjm = createInjectableQJM(cluster);
        qjm.format(FAKE_NSINFO);
        List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
        failIpcNumber(loggers.get(0), failA);
        failIpcNumber(loggers.get(1), failB);
        int lastAckedTxn = doWorkload(cluster, qjm);

        if (lastAckedTxn < 6) {
          LOG.info("Failed after injecting failures at " + injectionStr + 
              ". This is expected since we injected a failure in the " +
              "majority.");
        }
        qjm.close();
        qjm = null;

        // Now should be able to recover
        qjm = createInjectableQJM(cluster);
        long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        assertTrue(lastRecoveredTxn >= lastAckedTxn);
        
        writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
      } catch (Throwable t) {
        // Test failure! Rethrow with the test setup info so it can be
        // easily triaged.
        throw new RuntimeException("Test failed with injection: " + injectionStr,
              t); 
      } finally {
        cluster.shutdown();
        cluster = null;
        IOUtils.closeStream(qjm);
        qjm = null;
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestQJMWithFaults.java

示例6: testRollbackWithQJM

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
@Test
public void testRollbackWithQJM() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  MiniJournalCluster mjc = null;
  MiniDFSCluster cluster = null;
  final Path foo = new Path("/foo");
  final Path bar = new Path("/bar");

  try {
    mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(
        NUM_JOURNAL_NODES).build();
    mjc.waitActive();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc
        .getQuorumJournalURI(JOURNAL_ID).toString());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();

    DistributedFileSystem dfs = cluster.getFileSystem();
    final DFSAdmin dfsadmin = new DFSAdmin(conf);
    dfs.mkdirs(foo);

    // start rolling upgrade
    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    Assert.assertEquals(0,
        dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
    dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    // create new directory
    dfs.mkdirs(bar);
    dfs.close();

    // rollback
    cluster.restartNameNode("-rollingUpgrade", "rollback");
    // make sure /foo is still there, but /bar is not
    dfs = cluster.getFileSystem();
    Assert.assertTrue(dfs.exists(foo));
    Assert.assertFalse(dfs.exists(bar));

    // check storage in JNs
    for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
      File dir = mjc.getCurrentDir(0, JOURNAL_ID);
      // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
      // marker, mkdir, endSegment)
      checkJNStorage(dir, 4, 7);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
    if (mjc != null) {
      mjc.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:54,代码来源:TestRollingUpgradeRollback.java

示例7: testRecoverAfterDoubleFailures

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
/**
 * Sets up two of the nodes to each drop a single RPC, at all
 * possible combinations of RPCs. This may result in the
 * active writer failing to write. After this point, a new writer
 * should be able to recover and continue writing without
 * data loss.
 */
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
  final long MAX_IPC_NUMBER = determineMaxIpcNumber();
  
  for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
    for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
      String injectionStr = "(" + failA + ", " + failB + ")";
      
      LOG.info("\n\n-------------------------------------------\n" +
          "Beginning test, failing at " + injectionStr + "\n" +
          "-------------------------------------------\n\n");
      
      MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
        .build();
      cluster.waitActive();
      QuorumJournalManager qjm = null;
      try {
        qjm = createInjectableQJM(cluster);
        qjm.format(FAKE_NSINFO);
        List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
        failIpcNumber(loggers.get(0), failA);
        failIpcNumber(loggers.get(1), failB);
        int lastAckedTxn = doWorkload(cluster, qjm);

        if (lastAckedTxn < 6) {
          LOG.info("Failed after injecting failures at " + injectionStr + 
              ". This is expected since we injected a failure in the " +
              "majority.");
        }
        qjm.close();
        qjm = null;

        // Now should be able to recover
        qjm = createInjectableQJM(cluster);
        long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        assertTrue(lastRecoveredTxn >= lastAckedTxn);
        
        writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
      } catch (Throwable t) {
        // Test failure! Rethrow with the test setup info so it can be
        // easily triaged.
        throw new RuntimeException("Test failed with injection: " + injectionStr,
              t); 
      } finally {
        cluster.shutdown();
        cluster = null;
        IOUtils.closeStream(qjm);
        qjm = null;
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:60,代码来源:TestQJMWithFaults.java

示例8: testRecoverAfterDoubleFailures

import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入方法依赖的package包/类
/**
 * Sets up two of the nodes to each drop a single RPC, at all
 * possible combinations of RPCs. This may result in the
 * active writer failing to write. After this point, a new writer
 * should be able to recover and continue writing without
 * data loss.
 */
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
  final long MAX_IPC_NUMBER = determineMaxIpcNumber();
  
  for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
    for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
      String injectionStr = "(" + failA + ", " + failB + ")";
      
      LOG.info("\n\n-------------------------------------------\n" +
          "Beginning test, failing at " + injectionStr + "\n" +
          "-------------------------------------------\n\n");
      
      MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
        .build();
      QuorumJournalManager qjm = null;
      try {
        qjm = createInjectableQJM(cluster);
        qjm.transitionJournal(FAKE_NSINFO, Transition.FORMAT,
            StartupOption.REGULAR);
        List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
        failIpcNumber(loggers.get(0), failA);
        failIpcNumber(loggers.get(1), failB);
        int lastAckedTxn = doWorkload(cluster, qjm);

        if (lastAckedTxn < 6) {
          LOG.info("Failed after injecting failures at " + injectionStr + 
              ". This is expected since we injected a failure in the " +
              "majority.");
        }
        qjm.close();
        qjm = null;

        // Now should be able to recover
        qjm = createInjectableQJM(cluster);
        long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        assertTrue(lastRecoveredTxn >= lastAckedTxn);
        
        writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
      } catch (Throwable t) {
        // Test failure! Rethrow with the test setup info so it can be
        // easily triaged.
        throw new RuntimeException("Test failed with injection: " + injectionStr,
              t); 
      } finally {
        cluster.shutdown();
        cluster = null;
        IOUtils.closeStream(qjm);
        qjm = null;
      }
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:60,代码来源:TestQJMWithFaults.java


注:本文中的org.apache.hadoop.hdfs.qjournal.MiniJournalCluster.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。