当前位置: 首页>>代码示例>>Java>>正文


Java QJMTestUtil.recoverAndReturnLastTxn方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.qjournal.QJMTestUtil.recoverAndReturnLastTxn方法的典型用法代码示例。如果您正苦于以下问题:Java QJMTestUtil.recoverAndReturnLastTxn方法的具体用法?Java QJMTestUtil.recoverAndReturnLastTxn怎么用?Java QJMTestUtil.recoverAndReturnLastTxn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.qjournal.QJMTestUtil的用法示例。


在下文中一共展示了QJMTestUtil.recoverAndReturnLastTxn方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: tryRecoveryExpectingFailure

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
private void tryRecoveryExpectingFailure() throws IOException {
  try {
    QJMTestUtil.recoverAndReturnLastTxn(qjm);
    fail("Expected to fail recovery");
  } catch (QuorumException qe) {
    GenericTestUtils.assertExceptionContains("Injected", qe);
  } finally {
    qjm.close();
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestQuorumJournalManager.java

示例2: testMissFinalizeAndNextStart

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
/**
 * Test the case where one of the loggers misses a finalizeLogSegment()
 * call, and then misses the next startLogSegment() call before coming
 * back to life.
 * 
 * Previously, this caused it to keep on writing to the old log segment,
 * such that one logger had eg edits_1-10 while the others had edits_1-5 and
 * edits_6-10. This caused recovery to fail in certain cases.
 */
@Test
public void testMissFinalizeAndNextStart() throws Exception {
  
  // Logger 0: miss finalize(1-3) and start(4)
  futureThrows(new IOException("injected")).when(spies.get(0))
    .finalizeLogSegment(Mockito.eq(1L), Mockito.eq(3L));
  futureThrows(new IOException("injected")).when(spies.get(0))
    .startLogSegment(Mockito.eq(4L));
  
  // Logger 1: fail at txn id 4
  failLoggerAtTxn(spies.get(1), 4L);
  
  writeSegment(cluster, qjm, 1, 3, true);
  EditLogOutputStream stm = qjm.startLogSegment(4);
  try {
    writeTxns(stm, 4, 1);
    fail("Did not fail to write");
  } catch (QuorumException qe) {
    // Should fail, because logger 1 had an injected fault and
    // logger 0 should detect writer out of sync
    GenericTestUtils.assertExceptionContains("Writer out of sync",
        qe);
  } finally {
    stm.abort();
    qjm.close();
  }
  
  // State:
  // Logger 0: 1-3 in-progress (since it missed finalize)
  // Logger 1: 1-3 finalized
  // Logger 2: 1-3 finalized, 4 in-progress with one txn
  
  // Shut down logger 2 so it doesn't participate in recovery
  cluster.getJournalNode(2).stopAndJoin(0);
  
  qjm = createSpyingQJM();
  long recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
  assertEquals(3L, recovered);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:49,代码来源:TestQuorumJournalManager.java

示例3: testRecoverAfterDoubleFailures

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
/**
 * Sets up two of the nodes to each drop a single RPC, at all
 * possible combinations of RPCs. This may result in the
 * active writer failing to write. After this point, a new writer
 * should be able to recover and continue writing without
 * data loss.
 */
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
  final long MAX_IPC_NUMBER = determineMaxIpcNumber();
  
  for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
    for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
      String injectionStr = "(" + failA + ", " + failB + ")";
      
      LOG.info("\n\n-------------------------------------------\n" +
          "Beginning test, failing at " + injectionStr + "\n" +
          "-------------------------------------------\n\n");
      
      MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
        .build();
      QuorumJournalManager qjm = null;
      try {
        qjm = createInjectableQJM(cluster);
        qjm.format(FAKE_NSINFO);
        List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
        failIpcNumber(loggers.get(0), failA);
        failIpcNumber(loggers.get(1), failB);
        int lastAckedTxn = doWorkload(cluster, qjm);

        if (lastAckedTxn < 6) {
          LOG.info("Failed after injecting failures at " + injectionStr + 
              ". This is expected since we injected a failure in the " +
              "majority.");
        }
        qjm.close();
        qjm = null;

        // Now should be able to recover
        qjm = createInjectableQJM(cluster);
        long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        assertTrue(lastRecoveredTxn >= lastAckedTxn);
        
        writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
      } catch (Throwable t) {
        // Test failure! Rethrow with the test setup info so it can be
        // easily triaged.
        throw new RuntimeException("Test failed with injection: " + injectionStr,
              t); 
      } finally {
        cluster.shutdown();
        cluster = null;
        IOUtils.closeStream(qjm);
        qjm = null;
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestQJMWithFaults.java

示例4: testMissFinalizeAndNextStart

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
/**
 * Test the case where one of the loggers misses a finalizeLogSegment()
 * call, and then misses the next startLogSegment() call before coming
 * back to life.
 * 
 * Previously, this caused it to keep on writing to the old log segment,
 * such that one logger had eg edits_1-10 while the others had edits_1-5 and
 * edits_6-10. This caused recovery to fail in certain cases.
 */
@Test
public void testMissFinalizeAndNextStart() throws Exception {
  
  // Logger 0: miss finalize(1-3) and start(4)
  futureThrows(new IOException("injected")).when(spies.get(0))
    .finalizeLogSegment(Mockito.eq(1L), Mockito.eq(3L));
  futureThrows(new IOException("injected")).when(spies.get(0))
      .startLogSegment(Mockito.eq(4L),
          Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
  
  // Logger 1: fail at txn id 4
  failLoggerAtTxn(spies.get(1), 4L);
  
  writeSegment(cluster, qjm, 1, 3, true);
  EditLogOutputStream stm = qjm.startLogSegment(4,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  try {
    writeTxns(stm, 4, 1);
    fail("Did not fail to write");
  } catch (QuorumException qe) {
    // Should fail, because logger 1 had an injected fault and
    // logger 0 should detect writer out of sync
    GenericTestUtils.assertExceptionContains("Writer out of sync",
        qe);
  } finally {
    stm.abort();
    qjm.close();
  }
  
  // State:
  // Logger 0: 1-3 in-progress (since it missed finalize)
  // Logger 1: 1-3 finalized
  // Logger 2: 1-3 finalized, 4 in-progress with one txn
  
  // Shut down logger 2 so it doesn't participate in recovery
  cluster.getJournalNode(2).stopAndJoin(0);
  
  qjm = createSpyingQJM();
  long recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
  assertEquals(3L, recovered);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestQuorumJournalManager.java

示例5: testCrashBetweenSyncLogAndPersistPaxosData

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
@Test(timeout=20000)
public void testCrashBetweenSyncLogAndPersistPaxosData() throws Exception {
  JournalFaultInjector faultInjector =
      JournalFaultInjector.instance = Mockito.mock(JournalFaultInjector.class);

  setupLoggers345();

  // Run recovery where the client only talks to JN0, JN1, such that it
  // decides that the correct length is through txid 4.
  // Only allow it to call acceptRecovery() on JN0.
  qjm = createSpyingQJM();
  spies = qjm.getLoggerSetForTests().getLoggersForTests();    
  cluster.getJournalNode(2).stopAndJoin(0);
  injectIOE().when(spies.get(1)).acceptRecovery(
      Mockito.<SegmentStateProto>any(), Mockito.<URL>any());
  
  tryRecoveryExpectingFailure();

  cluster.restartJournalNode(2);
  
  // State at this point:
  // JN0: edit log for 1-4, paxos recovery data for txid 4
  // JN1: edit log for 1-4,
  // JN2: edit log for 1-5
  
  // Run recovery again, but don't allow JN0 to respond to the
  // prepareRecovery() call. This will cause recovery to decide
  // on txid 5.
  // Additionally, crash all of the nodes before they persist
  // any new paxos data.
  qjm = createSpyingQJM();
  spies = qjm.getLoggerSetForTests().getLoggersForTests();    
  injectIOE().when(spies.get(0)).prepareRecovery(Mockito.eq(1L));

  Mockito.doThrow(new IOException("Injected")).when(faultInjector)
    .beforePersistPaxosData();
  tryRecoveryExpectingFailure();
  Mockito.reset(faultInjector);
  
  // State at this point:
  // JN0: edit log for 1-5, paxos recovery data for txid 4
  // !!!   This is the interesting bit, above. The on-disk data and the
  //       paxos data don't match up!
  // JN1: edit log for 1-5,
  // JN2: edit log for 1-5,

  // Now, stop JN2, and see if we can still start up even though
  // JN0 is in a strange state where its log data is actually newer
  // than its accepted Paxos state.

  cluster.getJournalNode(2).stopAndJoin(0);
  
  qjm = createSpyingQJM();
  try {
    long recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
    assertTrue(recovered >= 4); // 4 was committed to a quorum
  } finally {
    qjm.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestQuorumJournalManager.java

示例6: testRecoverAfterDoubleFailures

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
/**
 * Sets up two of the nodes to each drop a single RPC, at all
 * possible combinations of RPCs. This may result in the
 * active writer failing to write. After this point, a new writer
 * should be able to recover and continue writing without
 * data loss.
 */
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
  final long MAX_IPC_NUMBER = determineMaxIpcNumber();
  
  for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
    for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
      String injectionStr = "(" + failA + ", " + failB + ")";
      
      LOG.info("\n\n-------------------------------------------\n" +
          "Beginning test, failing at " + injectionStr + "\n" +
          "-------------------------------------------\n\n");
      
      MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
        .build();
      cluster.waitActive();
      QuorumJournalManager qjm = null;
      try {
        qjm = createInjectableQJM(cluster);
        qjm.format(FAKE_NSINFO);
        List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
        failIpcNumber(loggers.get(0), failA);
        failIpcNumber(loggers.get(1), failB);
        int lastAckedTxn = doWorkload(cluster, qjm);

        if (lastAckedTxn < 6) {
          LOG.info("Failed after injecting failures at " + injectionStr + 
              ". This is expected since we injected a failure in the " +
              "majority.");
        }
        qjm.close();
        qjm = null;

        // Now should be able to recover
        qjm = createInjectableQJM(cluster);
        long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        assertTrue(lastRecoveredTxn >= lastAckedTxn);
        
        writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
      } catch (Throwable t) {
        // Test failure! Rethrow with the test setup info so it can be
        // easily triaged.
        throw new RuntimeException("Test failed with injection: " + injectionStr,
              t); 
      } finally {
        cluster.shutdown();
        cluster = null;
        IOUtils.closeStream(qjm);
        qjm = null;
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:60,代码来源:TestQJMWithFaults.java

示例7: testRecoverAfterDoubleFailures

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
/**
 * Sets up two of the nodes to each drop a single RPC, at all
 * possible combinations of RPCs. This may result in the
 * active writer failing to write. After this point, a new writer
 * should be able to recover and continue writing without
 * data loss.
 */
@Test
public void testRecoverAfterDoubleFailures() throws Exception {
  final long MAX_IPC_NUMBER = determineMaxIpcNumber();
  
  for (int failA = 1; failA <= MAX_IPC_NUMBER; failA++) {
    for (int failB = 1; failB <= MAX_IPC_NUMBER; failB++) {
      String injectionStr = "(" + failA + ", " + failB + ")";
      
      LOG.info("\n\n-------------------------------------------\n" +
          "Beginning test, failing at " + injectionStr + "\n" +
          "-------------------------------------------\n\n");
      
      MiniJournalCluster cluster = new MiniJournalCluster.Builder(conf)
        .build();
      QuorumJournalManager qjm = null;
      try {
        qjm = createInjectableQJM(cluster);
        qjm.transitionJournal(FAKE_NSINFO, Transition.FORMAT,
            StartupOption.REGULAR);
        List<AsyncLogger> loggers = qjm.getLoggerSetForTests().getLoggersForTests();
        failIpcNumber(loggers.get(0), failA);
        failIpcNumber(loggers.get(1), failB);
        int lastAckedTxn = doWorkload(cluster, qjm);

        if (lastAckedTxn < 6) {
          LOG.info("Failed after injecting failures at " + injectionStr + 
              ". This is expected since we injected a failure in the " +
              "majority.");
        }
        qjm.close();
        qjm = null;

        // Now should be able to recover
        qjm = createInjectableQJM(cluster);
        long lastRecoveredTxn = QJMTestUtil.recoverAndReturnLastTxn(qjm);
        assertTrue(lastRecoveredTxn >= lastAckedTxn);
        
        writeSegment(cluster, qjm, lastRecoveredTxn + 1, 3, true);
      } catch (Throwable t) {
        // Test failure! Rethrow with the test setup info so it can be
        // easily triaged.
        throw new RuntimeException("Test failed with injection: " + injectionStr,
              t); 
      } finally {
        cluster.shutdown();
        cluster = null;
        IOUtils.closeStream(qjm);
        qjm = null;
      }
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:60,代码来源:TestQJMWithFaults.java

示例8: testCrashBetweenSyncLogAndPersistPaxosData

import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; //导入方法依赖的package包/类
@Test(timeout=20000)
public void testCrashBetweenSyncLogAndPersistPaxosData() throws Exception {
  JournalFaultInjector faultInjector =
      JournalFaultInjector.instance = Mockito.mock(JournalFaultInjector.class);

  setupLoggers345();

  // Run recovery where the client only talks to JN0, JN1, such that it
  // decides that the correct length is through txid 4.
  // Only allow it to call acceptRecovery() on JN0.
  qjm = createSpyingQJM();
  spies = qjm.getLoggerSetForTests().getLoggersForTests();    
  cluster.getJournalNode(2).stopAndJoin(0);
  injectIOE().when(spies.get(1)).acceptRecovery(
      Mockito.<SegmentStateProto>any(), Mockito.<String>any());
  
  tryRecoveryExpectingFailure();

  cluster.restartJournalNode(2);
  
  // State at this point:
  // JN0: edit log for 1-4, paxos recovery data for txid 4
  // JN1: edit log for 1-4,
  // JN2: edit log for 1-5
  
  // Run recovery again, but don't allow JN0 to respond to the
  // prepareRecovery() call. This will cause recovery to decide
  // on txid 5.
  // Additionally, crash all of the nodes before they persist
  // any new paxos data.
  qjm = createSpyingQJM();
  spies = qjm.getLoggerSetForTests().getLoggersForTests();    
  injectIOE().when(spies.get(0)).prepareRecovery(Mockito.eq(1L));

  Mockito.doThrow(new IOException("Injected")).when(faultInjector)
    .beforePersistPaxosData();
  tryRecoveryExpectingFailure();
  Mockito.reset(faultInjector);
  
  // State at this point:
  // JN0: edit log for 1-5, paxos recovery data for txid 4
  // !!!   This is the interesting bit, above. The on-disk data and the
  //       paxos data don't match up!
  // JN1: edit log for 1-5,
  // JN2: edit log for 1-5,

  // Now, stop JN2, and see if we can still start up even though
  // JN0 is in a strange state where its log data is actually newer
  // than its accepted Paxos state.

  cluster.getJournalNode(2).stopAndJoin(0);
  
  qjm = createSpyingQJM();
  try {
    long recovered = QJMTestUtil.recoverAndReturnLastTxn(qjm);
    assertTrue(recovered >= 4); // 4 was committed to a quorum
  } finally {
    qjm.close();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:61,代码来源:TestQuorumJournalManager.java


注:本文中的org.apache.hadoop.hdfs.qjournal.QJMTestUtil.recoverAndReturnLastTxn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。