当前位置: 首页>>代码示例>>Java>>正文


Java DelayAnswer类代码示例

本文整理汇总了Java中org.apache.hadoop.test.GenericTestUtils.DelayAnswer的典型用法代码示例。如果您正苦于以下问题:Java DelayAnswer类的具体用法?Java DelayAnswer怎么用?Java DelayAnswer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DelayAnswer类属于org.apache.hadoop.test.GenericTestUtils包,在下文中一共展示了DelayAnswer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testQueueLimiting

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Test that, once the queue eclipses the configure size limit,
 * calls to journal more data are rejected.
 */
@Test
public void testQueueLimiting() throws Exception {
  // Block the underlying fake proxy from actually completing any calls.
  DelayAnswer delayer = new DelayAnswer(LOG);
  Mockito.doAnswer(delayer).when(mockProxy).journal(
      Mockito.<RequestInfo>any(),
      Mockito.eq(1L), Mockito.eq(1L),
      Mockito.eq(1), Mockito.same(FAKE_DATA));
  
  // Queue up the maximum number of calls.
  int numToQueue = LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
  for (int i = 1; i <= numToQueue; i++) {
    ch.sendEdits(1L, (long)i, 1, FAKE_DATA);
  }
  
  // The accounting should show the correct total number queued.
  assertEquals(LIMIT_QUEUE_SIZE_BYTES, ch.getQueuedEditsSize());
  
  // Trying to queue any more should fail.
  try {
    ch.sendEdits(1L, numToQueue + 1, 1, FAKE_DATA).get(1, TimeUnit.SECONDS);
    fail("Did not fail to queue more calls after queue was full");
  } catch (ExecutionException ee) {
    if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
      throw ee;
    }
  }
  
  delayer.proceed();

  // After we allow it to proceeed, it should chug through the original queue
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ch.getQueuedEditsSize() == 0;
    }
  }, 10, 1000);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestIPCLoggerChannel.java

示例2: testQueueLimiting

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Test that, once the queue eclipses the configure size limit,
 * calls to journal more data are rejected.
 */
@Test
public void testQueueLimiting() throws Exception {
  
  // Block the underlying fake proxy from actually completing any calls.
  DelayAnswer delayer = new DelayAnswer(LOG);
  Mockito.doAnswer(delayer).when(mockProxy).journal(
      Mockito.<JournalRequestInfo>any());
  
  // Queue up the maximum number of calls.
  int numToQueue = LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
  for (int i = 1; i <= numToQueue; i++) {
    ch.sendEdits(1L, (long)i, 1, FAKE_DATA);
  }
  
  // The accounting should show the correct total number queued.
  assertEquals(LIMIT_QUEUE_SIZE_BYTES, ch.getQueuedEditsSize());
  
  // Trying to queue any more should fail.
  try {
    ch.sendEdits(1L, numToQueue + 1, 1, FAKE_DATA).get(1, TimeUnit.SECONDS);
    fail("Did not fail to queue more calls after queue was full");
  } catch (ExecutionException ee) {
    if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
      throw ee;
    }
  }
  
  delayer.proceed();

  // After we allow it to proceeed, it should chug through the original queue
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ch.getQueuedEditsSize() == 0;
    }
  }, 10, 1000);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:TestIPCLoggerChannel.java

示例3: testStandbyExceptionThrownDuringCheckpoint

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Make sure that clients will receive StandbyExceptions even when a
 * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
 * thread will have FSNS lock. Regression test for HDFS-4591.
 */
@Test(timeout=300000)
public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
  
  // Set it up so that we know when the SBN checkpoint starts and ends.
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  DelayAnswer answerer = new DelayAnswer(LOG);
  Mockito.doAnswer(answerer).when(spyImage1)
      .saveNamespace(Mockito.any(FSNamesystem.class),
          Mockito.any(Canceler.class));
  
  // Perform some edits and wait for a checkpoint to start on the SBN.
  doEdits(0, 1000);
  nn0.getRpcServer().rollEditLog();
  answerer.waitForCall();
  answerer.proceed();
  assertTrue("SBN is not performing checkpoint but it should be.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  
  // Make sure that the lock has actually been taken by the checkpointing
  // thread.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  try {
    // Perform an RPC to the SBN and make sure it throws a StandbyException.
    nn1.getRpcServer().getFileInfo("/");
    fail("Should have thrown StandbyException, but instead succeeded.");
  } catch (StandbyException se) {
    GenericTestUtils.assertExceptionContains("is not supported", se);
  }
  
  // Make sure that the checkpoint is still going on, implying that the client
  // RPC to the SBN happened during the checkpoint.
  assertTrue("SBN should have still been checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  answerer.waitForResult();
  assertTrue("SBN should have finished checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:43,代码来源:TestStandbyCheckpoints.java

示例4: testQueueLimiting

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Test that, once the queue eclipses the configure size limit,
 * calls to journal more data are rejected.
 */
@Test
public void testQueueLimiting() throws Exception {
  
  // Block the underlying fake proxy from actually completing any calls.
  DelayAnswer delayer = new DelayAnswer(LOG);
  Mockito.doAnswer(delayer).when(mockProxy).journal(
      Mockito.<RequestInfo>any(),
      Mockito.eq(1L), Mockito.eq(1L),
      Mockito.eq(1), Mockito.same(FAKE_DATA));
  
  // Queue up the maximum number of calls.
  int numToQueue = LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
  for (int i = 1; i <= numToQueue; i++) {
    ch.sendEdits(1L, (long)i, 1, FAKE_DATA);
  }
  
  // The accounting should show the correct total number queued.
  assertEquals(LIMIT_QUEUE_SIZE_BYTES, ch.getQueuedEditsSize());
  
  // Trying to queue any more should fail.
  try {
    ch.sendEdits(1L, numToQueue + 1, 1, FAKE_DATA).get(1, TimeUnit.SECONDS);
    fail("Did not fail to queue more calls after queue was full");
  } catch (ExecutionException ee) {
    if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
      throw ee;
    }
  }
  
  delayer.proceed();

  // After we allow it to proceeed, it should chug through the original queue
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ch.getQueuedEditsSize() == 0;
    }
  }, 10, 1000);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:44,代码来源:TestIPCLoggerChannel.java

示例5: testStandbyExceptionThrownDuringCheckpoint

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Make sure that clients will receive StandbyExceptions even when a
 * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
 * thread will have FSNS lock. Regression test for HDFS-4591.
 */
@Test(timeout=300000)
public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
  
  // Set it up so that we know when the SBN checkpoint starts and ends.
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  DelayAnswer answerer = new DelayAnswer(LOG);
  Mockito.doAnswer(answerer).when(spyImage1)
      .saveNamespace(Mockito.any(FSNamesystem.class),
          Mockito.eq(NameNodeFile.IMAGE), Mockito.any(Canceler.class));

  // Perform some edits and wait for a checkpoint to start on the SBN.
  doEdits(0, 1000);
  nn0.getRpcServer().rollEditLog();
  answerer.waitForCall();
  assertTrue("SBN is not performing checkpoint but it should be.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  
  // Make sure that the lock has actually been taken by the checkpointing
  // thread.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  try {
    // Perform an RPC to the SBN and make sure it throws a StandbyException.
    nn1.getRpcServer().getFileInfo("/");
    fail("Should have thrown StandbyException, but instead succeeded.");
  } catch (StandbyException se) {
    GenericTestUtils.assertExceptionContains("is not supported", se);
  }
  
  // Make sure that the checkpoint is still going on, implying that the client
  // RPC to the SBN happened during the checkpoint.
  assertTrue("SBN should have still been checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  answerer.proceed();
  answerer.waitForResult();
  assertTrue("SBN should have finished checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:43,代码来源:TestStandbyCheckpoints.java

示例6: testRBWReportArrivesAfterEdits

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Another regression test for HDFS-2742. This tests the following sequence:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - The block report is delayed in reaching the standby.
 * - The file is closed.
 * - The standby processes the OP_ADD and OP_CLOSE operations before
 *   the RBW block report arrives.
 * - The standby should not mark the block as corrupt.
 */
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
  final CountDownLatch brFinished = new CountDownLatch(1);
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
    @Override
    protected Object passThrough(InvocationOnMock invocation)
        throws Throwable {
      try {
        return super.passThrough(invocation);
      } finally {
        // inform the test that our block report went through.
        brFinished.countDown();
      }
    }
  };

  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn2);
    
    Mockito.doAnswer(delayer)
      .when(spy).blockReport(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageBlockReport[]>anyObject(),
        Mockito.<BlockReportContext>anyObject());
    dn.scheduleAllBlockReport(0);
    delayer.waitForCall();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  delayer.proceed();
  brFinished.await();
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:TestDNFencing.java

示例7: testStandbyExceptionThrownDuringCheckpoint

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Make sure that clients will receive StandbyExceptions even when a
 * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
 * thread will have FSNS lock. Regression test for HDFS-4591.
 */
@Test(timeout=300000)
public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
  
  // Set it up so that we know when the SBN checkpoint starts and ends.
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  DelayAnswer answerer = new DelayAnswer(LOG);
  Mockito.doAnswer(answerer).when(spyImage1)
      .saveNamespace(Mockito.any(FSNamesystem.class),
          Mockito.eq(NameNodeFile.IMAGE), Mockito.any(Canceler.class));

  // Perform some edits and wait for a checkpoint to start on the SBN.
  doEdits(0, 1000);
  nn0.getRpcServer().rollEditLog();
  answerer.waitForCall();
  assertTrue("SBN is not performing checkpoint but it should be.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  
  // Make sure that the lock has actually been taken by the checkpointing
  // thread.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  try {
    // Perform an RPC to the SBN and make sure it throws a StandbyException.
    nn1.getRpcServer().getFileInfo("/");
    fail("Should have thrown StandbyException, but instead succeeded.");
  } catch (StandbyException se) {
    GenericTestUtils.assertExceptionContains("is not supported", se);
  }

  // Make sure new incremental block reports are processed during
  // checkpointing on the SBN.
  assertEquals(0, cluster.getNamesystem(1).getPendingDataNodeMessageCount());
  doCreate();
  Thread.sleep(1000);
  assertTrue(cluster.getNamesystem(1).getPendingDataNodeMessageCount() > 0);
  
  // Make sure that the checkpoint is still going on, implying that the client
  // RPC to the SBN happened during the checkpoint.
  assertTrue("SBN should have still been checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  answerer.proceed();
  answerer.waitForResult();
  assertTrue("SBN should have finished checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestStandbyCheckpoints.java

示例8: testReadsAllowedDuringCheckpoint

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
@Test(timeout=300000)
public void testReadsAllowedDuringCheckpoint() throws Exception {
  
  // Set it up so that we know when the SBN checkpoint starts and ends.
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  DelayAnswer answerer = new DelayAnswer(LOG);
  Mockito.doAnswer(answerer).when(spyImage1)
      .saveNamespace(Mockito.any(FSNamesystem.class),
          Mockito.any(NameNodeFile.class),
          Mockito.any(Canceler.class));
  
  // Perform some edits and wait for a checkpoint to start on the SBN.
  doEdits(0, 1000);
  nn0.getRpcServer().rollEditLog();
  answerer.waitForCall();
  assertTrue("SBN is not performing checkpoint but it should be.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  
  // Make sure that the lock has actually been taken by the checkpointing
  // thread.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  
  // Perform an RPC that needs to take the write lock.
  Thread t = new Thread() {
    @Override
    public void run() {
      try {
        nn1.getRpcServer().restoreFailedStorage("false");
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  };
  t.start();
  
  // Make sure that our thread is waiting for the lock.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  
  assertFalse(nn1.getNamesystem().getFsLockForTests().hasQueuedThreads());
  assertFalse(nn1.getNamesystem().getFsLockForTests().isWriteLocked());
  assertTrue(nn1.getNamesystem().getCpLockForTests().hasQueuedThreads());
  
  // Get /jmx of the standby NN web UI, which will cause the FSNS read lock to
  // be taken.
  String pageContents = DFSTestUtil.urlGet(new URL("http://" +
      nn1.getHttpAddress().getHostName() + ":" +
      nn1.getHttpAddress().getPort() + "/jmx"));
  assertTrue(pageContents.contains("NumLiveDataNodes"));
  
  // Make sure that the checkpoint is still going on, implying that the client
  // RPC to the SBN happened during the checkpoint.
  assertTrue("SBN should have still been checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  answerer.proceed();
  answerer.waitForResult();
  assertTrue("SBN should have finished checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
  
  t.join();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestStandbyCheckpoints.java

示例9: testCancelSaveNamespace

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
@Test(timeout=20000)
public void testCancelSaveNamespace() throws Exception {
  Configuration conf = getConf();
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);

  // Replace the FSImage with a spy
  final FSImage image = fsn.getFSImage();
  NNStorage storage = image.getStorage();
  storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
  storage.setStorageDirectories(
      FSNamesystem.getNamespaceDirs(conf), 
      FSNamesystem.getNamespaceEditsDirs(conf));

  FSNamesystem spyFsn = spy(fsn);
  final FSNamesystem finalFsn = spyFsn;
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
  BlockIdManager bid = spy(spyFsn.getBlockIdManager());
  Whitebox.setInternalState(finalFsn, "blockIdManager", bid);
  doAnswer(delayer).when(bid).getGenerationStampV2();

  ExecutorService pool = Executors.newFixedThreadPool(2);
  
  try {
    doAnEdit(fsn, 1);
    final Canceler canceler = new Canceler();
    
    // Save namespace
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    try {
      Future<Void> saverFuture = pool.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
          image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler);
          return null;
        }
      });

      // Wait until saveNamespace calls getGenerationStamp
      delayer.waitForCall();
      // then cancel the saveNamespace
      Future<Void> cancelFuture = pool.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
          canceler.cancel("cancelled");
          return null;
        }
      });
      // give the cancel call time to run
      Thread.sleep(500);
      
      // allow saveNamespace to proceed - it should check the cancel flag after
      // this point and throw an exception
      delayer.proceed();

      cancelFuture.get();
      saverFuture.get();
      fail("saveNamespace did not fail even though cancelled!");
    } catch (Throwable t) {
      GenericTestUtils.assertExceptionContains(
          "SaveNamespaceCancelledException", t);
    }
    LOG.info("Successfully cancelled a saveNamespace");


    // Check that we have only the original image and not any
    // cruft left over from half-finished images
    FSImageTestUtil.logStorageContents(LOG, storage);
    for (StorageDirectory sd : storage.dirIterable(null)) {
      File curDir = sd.getCurrentDir();
      GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*",
          NNStorage.getImageFileName(0),
          NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
    }      
  } finally {
    fsn.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:80,代码来源:TestSaveNamespace.java

示例10: testOneReplicaRbwReportArrivesAfterBlockCompleted

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Test for the case where one of the DNs in the pipeline is in the
 * process of doing a block report exactly when the block is closed.
 * In this case, the block report becomes delayed until after the
 * block is marked completed on the NN, and hence it reports an RBW
 * replica for a COMPLETE block. Such a report should not be marked
 * corrupt.
 * This is a regression test for HDFS-2791.
 */
@Test(timeout=300000)
public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception {
  final CountDownLatch brFinished = new CountDownLatch(1);
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
    @Override
    protected Object passThrough(InvocationOnMock invocation)
        throws Throwable {
      try {
        return super.passThrough(invocation);
      } finally {
        // inform the test that our block report went through.
        brFinished.countDown();
      }
    }
  };

  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");

  // Start a second DN for this test -- we're checking
  // what happens when one of the DNs is slowed for some reason.
  REPL_FACTOR = 2;
  startDNandWait(null, false);

  NameNode nn = cluster.getNameNode();

  FSDataOutputStream out = fs.create(filePath, REPL_FACTOR);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    // Set up a spy so that we can delay the block report coming
    // from this node.
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn);

    Mockito.doAnswer(delayer)
      .when(spy).blockReport(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageBlockReport[]>anyObject(),
        Mockito.<BlockReportContext>anyObject());

    // Force a block report to be generated. The block report will have
    // an RBW replica in it. Wait for the RPC to be sent, but block
    // it before it gets to the NN.
    dn.scheduleAllBlockReport(0);
    delayer.waitForCall();

  } finally {
    IOUtils.closeStream(out);
  }

  // Now that the stream is closed, the NN will have the block in COMPLETE
  // state.
  delayer.proceed();
  brFinished.await();

  // Verify that no replicas are marked corrupt, and that the
  // file is still readable.
  BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
  assertEquals(0, nn.getNamesystem().getCorruptReplicaBlocks());
  DFSTestUtil.readFile(fs, filePath);

  // Ensure that the file is readable even from the DN that we futzed with.
  cluster.stopDataNode(1);
  DFSTestUtil.readFile(fs, filePath);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:79,代码来源:BlockReportTestBase.java

示例11: testStandbyExceptionThrownDuringCheckpoint

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Make sure that clients will receive StandbyExceptions even when a
 * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
 * thread will have FSNS lock. Regression test for HDFS-4591.
 */
@Test(timeout=300000)
public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
  
  // Set it up so that we know when the SBN checkpoint starts and ends.
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nns[1]);
  DelayAnswer answerer = new DelayAnswer(LOG);
  Mockito.doAnswer(answerer).when(spyImage1)
      .saveNamespace(Mockito.any(FSNamesystem.class),
          Mockito.eq(NameNodeFile.IMAGE), Mockito.any(Canceler.class));

  // Perform some edits and wait for a checkpoint to start on the SBN.
  doEdits(0, 1000);
  nns[0].getRpcServer().rollEditLog();
  answerer.waitForCall();
  assertTrue("SBN is not performing checkpoint but it should be.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  
  // Make sure that the lock has actually been taken by the checkpointing
  // thread.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  try {
    // Perform an RPC to the SBN and make sure it throws a StandbyException.
    nns[1].getRpcServer().getFileInfo("/");
    fail("Should have thrown StandbyException, but instead succeeded.");
  } catch (StandbyException se) {
    GenericTestUtils.assertExceptionContains("is not supported", se);
  }

  // Make sure new incremental block reports are processed during
  // checkpointing on the SBN.
  assertEquals(0, cluster.getNamesystem(1).getPendingDataNodeMessageCount());
  doCreate();
  Thread.sleep(1000);
  assertTrue(cluster.getNamesystem(1).getPendingDataNodeMessageCount() > 0);
  
  // Make sure that the checkpoint is still going on, implying that the client
  // RPC to the SBN happened during the checkpoint.
  assertTrue("SBN should have still been checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  answerer.proceed();
  answerer.waitForResult();
  assertTrue("SBN should have finished checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:50,代码来源:TestStandbyCheckpoints.java

示例12: testReadsAllowedDuringCheckpoint

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
@Test(timeout=300000)
public void testReadsAllowedDuringCheckpoint() throws Exception {
  
  // Set it up so that we know when the SBN checkpoint starts and ends.
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nns[1]);
  DelayAnswer answerer = new DelayAnswer(LOG);
  Mockito.doAnswer(answerer).when(spyImage1)
      .saveNamespace(Mockito.any(FSNamesystem.class),
          Mockito.any(NameNodeFile.class),
          Mockito.any(Canceler.class));
  
  // Perform some edits and wait for a checkpoint to start on the SBN.
  doEdits(0, 1000);
  nns[0].getRpcServer().rollEditLog();
  answerer.waitForCall();
  assertTrue("SBN is not performing checkpoint but it should be.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  
  // Make sure that the lock has actually been taken by the checkpointing
  // thread.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
  
  // Perform an RPC that needs to take the write lock.
  Thread t = new Thread() {
    @Override
    public void run() {
      try {
        nns[1].getRpcServer().restoreFailedStorage("false");
      } catch (IOException e) {
        e.printStackTrace();
      }
    }
  };
  t.start();
  
  // Make sure that our thread is waiting for the lock.
  ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);

  assertFalse(nns[1].getNamesystem().getFsLockForTests().hasQueuedThreads());
  assertFalse(nns[1].getNamesystem().getFsLockForTests().isWriteLocked());
  assertTrue(nns[1].getNamesystem().getCpLockForTests().hasQueuedThreads());

  // Get /jmx of the standby NN web UI, which will cause the FSNS read lock to
  // be taken.
  String pageContents = DFSTestUtil.urlGet(new URL("http://" +
      nns[1].getHttpAddress().getHostName() + ":" +
      nns[1].getHttpAddress().getPort() + "/jmx"));
  assertTrue(pageContents.contains("NumLiveDataNodes"));
  
  // Make sure that the checkpoint is still going on, implying that the client
  // RPC to the SBN happened during the checkpoint.
  assertTrue("SBN should have still been checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
  answerer.proceed();
  answerer.waitForResult();
  assertTrue("SBN should have finished checkpointing.",
      answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
  
  t.join();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:61,代码来源:TestStandbyCheckpoints.java

示例13: testNoExtraReplicationWhenBlockReceivedIsLate

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * This test makes sure that, when a file is closed before all
 * of the datanodes in the pipeline have reported their replicas,
 * the NameNode doesn't consider the block under-replicated too
 * aggressively. It is a regression test for HDFS-1172.
 */
@Test(timeout=60000)
public void testNoExtraReplicationWhenBlockReceivedIsLate()
    throws Exception {
  LOG.info("Test block replication when blockReceived is late" );
  final short numDataNodes = 3;
  final short replication = 3;
  final Configuration conf = new Configuration();
      conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(numDataNodes).build();
  final String testFile = "/replication-test-file";
  final Path testPath = new Path(testFile);
  final BlockManager bm =
      cluster.getNameNode().getNamesystem().getBlockManager();

  try {
    cluster.waitActive();

    // Artificially delay IBR from 1 DataNode.
    // this ensures that the client's completeFile() RPC will get to the
    // NN before some of the replicas are reported.
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
        DataNodeTestUtils.spyOnBposToNN(dn, nn);
    DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
    Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageReceivedDeletedBlocks[]>anyObject());

    FileSystem fs = cluster.getFileSystem();
    // Create and close a small file with two blocks
    DFSTestUtil.createFile(fs, testPath, 1500, replication, 0);

    // schedule replication via BlockManager#computeReplicationWork
    BlockManagerTestUtil.computeAllPendingWork(bm);

    // Initially, should have some pending replication since the close()
    // is earlier than at lease one of the reportReceivedDeletedBlocks calls
    assertTrue(pendingReplicationCount(bm) > 0);

    // release pending IBR.
    delayer.waitForCall();
    delayer.proceed();
    delayer.waitForResult();

    // make sure DataNodes do replication work if exists
    for (DataNode d : cluster.getDataNodes()) {
      DataNodeTestUtils.triggerHeartbeat(d);
    }

    // Wait until there is nothing pending
    try {
      GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
          return pendingReplicationCount(bm) == 0;
        }
      }, 100, 3000);
    } catch (TimeoutException e) {
      fail("timed out while waiting for no pending replication.");
    }

    // Check that none of the datanodes have serviced a replication request.
    // i.e. that the NameNode didn't schedule any spurious replication.
    assertNoReplicationWasPerformed(cluster);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:80,代码来源:TestReplication.java

示例14: testCancelSaveNamespace

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
@Test(timeout=20000)
public void testCancelSaveNamespace() throws Exception {
  Configuration conf = getConf();
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);

  // Replace the FSImage with a spy
  final FSImage image = fsn.getFSImage();
  NNStorage storage = image.getStorage();
  storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
  storage.setStorageDirectories(
      FSNamesystem.getNamespaceDirs(conf), 
      FSNamesystem.getNamespaceEditsDirs(conf));

  FSNamesystem spyFsn = spy(fsn);
  final FSNamesystem finalFsn = spyFsn;
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
  doAnswer(delayer).when(spyFsn).getGenerationStampV2();
  
  ExecutorService pool = Executors.newFixedThreadPool(2);
  
  try {
    doAnEdit(fsn, 1);
    final Canceler canceler = new Canceler();
    
    // Save namespace
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    try {
      Future<Void> saverFuture = pool.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
          image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler);
          return null;
        }
      });

      // Wait until saveNamespace calls getGenerationStamp
      delayer.waitForCall();
      // then cancel the saveNamespace
      Future<Void> cancelFuture = pool.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
          canceler.cancel("cancelled");
          return null;
        }
      });
      // give the cancel call time to run
      Thread.sleep(500);
      
      // allow saveNamespace to proceed - it should check the cancel flag after
      // this point and throw an exception
      delayer.proceed();

      cancelFuture.get();
      saverFuture.get();
      fail("saveNamespace did not fail even though cancelled!");
    } catch (Throwable t) {
      GenericTestUtils.assertExceptionContains(
          "SaveNamespaceCancelledException", t);
    }
    LOG.info("Successfully cancelled a saveNamespace");


    // Check that we have only the original image and not any
    // cruft left over from half-finished images
    FSImageTestUtil.logStorageContents(LOG, storage);
    for (StorageDirectory sd : storage.dirIterable(null)) {
      File curDir = sd.getCurrentDir();
      GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*",
          NNStorage.getImageFileName(0),
          NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
    }      
  } finally {
    if (fsn != null) {
      fsn.close();
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:80,代码来源:TestSaveNamespace.java

示例15: testRBWReportArrivesAfterEdits

import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; //导入依赖的package包/类
/**
 * Another regression test for HDFS-2742. This tests the following sequence:
 * - DN does a block report while file is open. This BR contains
 *   the block in RBW state.
 * - The block report is delayed in reaching the standby.
 * - The file is closed.
 * - The standby processes the OP_ADD and OP_CLOSE operations before
 *   the RBW block report arrives.
 * - The standby should not mark the block as corrupt.
 */
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
  final CountDownLatch brFinished = new CountDownLatch(1);
  DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
    @Override
    protected Object passThrough(InvocationOnMock invocation)
        throws Throwable {
      try {
        return super.passThrough(invocation);
      } finally {
        // inform the test that our block report went through.
        brFinished.countDown();
      }
    }
  };

  FSDataOutputStream out = fs.create(TEST_FILE_PATH);
  try {
    AppendTestUtil.write(out, 0, 10);
    out.hflush();

    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeProtocolClientSideTranslatorPB spy =
      DataNodeTestUtils.spyOnBposToNN(dn, nn2);
    
    Mockito.doAnswer(delayer)
      .when(spy).blockReport(
        Mockito.<DatanodeRegistration>anyObject(),
        Mockito.anyString(),
        Mockito.<StorageBlockReport[]>anyObject());
    dn.scheduleAllBlockReport(0);
    delayer.waitForCall();
    
  } finally {
    IOUtils.closeStream(out);
  }

  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  
  delayer.proceed();
  brFinished.await();
  
  // Verify that no replicas are marked corrupt, and that the
  // file is readable from the failed-over standby.
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
  assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
  assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
  
  DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:63,代码来源:TestDNFencing.java


注:本文中的org.apache.hadoop.test.GenericTestUtils.DelayAnswer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。