当前位置: 首页>>代码示例>>Java>>正文


Java GenericTestUtils.assertExists方法代码示例

本文整理汇总了Java中org.apache.hadoop.test.GenericTestUtils.assertExists方法的典型用法代码示例。如果您正苦于以下问题:Java GenericTestUtils.assertExists方法的具体用法?Java GenericTestUtils.assertExists怎么用?Java GenericTestUtils.assertExists使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.test.GenericTestUtils的用法示例。


在下文中一共展示了GenericTestUtils.assertExists方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: assertEditFiles

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * Check that the given list of edits files are present in the given storage
 * dirs.
 */
private void assertEditFiles(Iterable<URI> dirs, String ... files)
    throws IOException {
  for (URI u : dirs) {
    File editDirRoot = new File(u.getPath());
    File editDir = new File(editDirRoot, "current");
    GenericTestUtils.assertExists(editDir);
    if (files.length == 0) {
      LOG.info("Checking no edit files exist in " + editDir);
    } else {
      LOG.info("Checking for following edit files in " + editDir
          + ": " + Joiner.on(",").join(files));
    }
    
    GenericTestUtils.assertGlobEquals(editDir, "edits_.*", files);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestEditLogsDuringFailover.java

示例2: testSharedEditsMissingLogs

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * Test for the case where the shared edits dir doesn't have
 * all of the recent edit logs.
 */
@Test
public void testSharedEditsMissingLogs() throws Exception {
  removeStandbyNameDirs();
  
  CheckpointSignature sig = nn0.getRpcServer().rollEditLog();
  assertEquals(3, sig.getCurSegmentTxId());
  
  // Should have created edits_1-2 in shared edits dir
  URI editsUri = cluster.getSharedEditsDir(0, 1);
  File editsDir = new File(editsUri);
  File editsSegment = new File(new File(editsDir, "current"),
      NNStorage.getFinalizedEditsFileName(1, 2));
  GenericTestUtils.assertExists(editsSegment);

  // Delete the segment.
  assertTrue(editsSegment.delete());
  
  // Trying to bootstrap standby should now fail since the edit
  // logs aren't available in the shared dir.
  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(BootstrapStandby.class));
  try {
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc);
  } finally {
    logs.stopCapturing();
  }
  GenericTestUtils.assertMatches(logs.getOutput(),
      "FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestBootstrapStandby.java

示例3: testGenericKeysForNameNodeFormat

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestValidateConfigurationSettings.java

示例4: testZeroBlockSize

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestFSImage.java

示例5: testLoadLogsFromBuggyEarlierVersions

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestFileAppendRestart.java

示例6: testJournalLocking

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
  Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
  StorageDirectory sd = journal.getStorage().getStorageDir(0);
  File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
  
  // Journal should be locked, since the format() call locks it.
  GenericTestUtils.assertExists(lockFile);

  journal.newEpoch(FAKE_NSINFO,  1);
  try {
    new Journal(conf, TEST_LOG_DIR, JID, StartupOption.REGULAR,
        mockErrorReporter);
    fail("Did not fail to create another journal in same dir");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "Cannot lock storage", ioe);
  }
  
  journal.close();
  
  // Journal should no longer be locked after the close() call.
  // Hence, should be able to create a new Journal in the same dir.
  Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID,
      StartupOption.REGULAR, mockErrorReporter);
  journal2.newEpoch(FAKE_NSINFO, 2);
  journal2.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestJournal.java

示例7: testAbortOldSegmentIfFinalizeIsMissed

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * Assume that a client is writing to a journal, but loses its connection
 * in the middle of a segment. Thus, any future journal() calls in that
 * segment may fail, because some txns were missed while the connection was
 * down.
 *
 * Eventually, the connection comes back, and the NN tries to start a new
 * segment at a higher txid. This should abort the old one and succeed.
 */
@Test (timeout = 10000)
public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
  journal.newEpoch(FAKE_NSINFO, 1);
  
  // Start a segment at txid 1, and write a batch of 3 txns.
  journal.startLogSegment(makeRI(1), 1,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  journal.journal(makeRI(2), 1, 1, 3,
      QJMTestUtil.createTxnData(1, 3));

  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(1));
  
  // Try to start new segment at txid 6, this should abort old segment and
  // then succeed, allowing us to write txid 6-9.
  journal.startLogSegment(makeRI(3), 6,
      NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
  journal.journal(makeRI(4), 6, 6, 3,
      QJMTestUtil.createTxnData(6, 3));

  // The old segment should *not* be finalized.
  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(1));
  GenericTestUtils.assertExists(
      journal.getStorage().getInProgressEditLog(6));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestJournal.java

示例8: testCheckpointWithFailedStorageDir

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * Test that, if a storage directory is failed when a checkpoint occurs,
 * the non-failed storage directory receives the checkpoint.
 */
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of a volume -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    StorageDirectory sd1 = storage.getStorageDir(1);
    
    currentDir = sd0.getCurrentDir();
    FileUtil.setExecutable(currentDir, false);

    // Upload checkpoint when NN has a bad storage dir. This should
    // succeed and create the checkpoint in the good dir.
    secondary.doCheckpoint();
    
    GenericTestUtils.assertExists(
        new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
    
    // Restore the good dir
    FileUtil.setExecutable(currentDir, true);
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to both dirs
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.setExecutable(currentDir, true);
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestCheckpoint.java

示例9: testEarlierVersionEditLog

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestPersistBlocks.java

示例10: testPurgeLogs

import org.apache.hadoop.test.GenericTestUtils; //导入方法依赖的package包/类
@Test
public void testPurgeLogs() throws Exception {
  for (int txid = 1; txid <= 5; txid++) {
    writeSegment(cluster, qjm, txid, 1, true);
  }
  File curDir = cluster.getCurrentDir(0, JID);
  GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
      NNStorage.getFinalizedEditsFileName(1, 1),
      NNStorage.getFinalizedEditsFileName(2, 2),
      NNStorage.getFinalizedEditsFileName(3, 3),
      NNStorage.getFinalizedEditsFileName(4, 4),
      NNStorage.getFinalizedEditsFileName(5, 5));
  File paxosDir = new File(curDir, "paxos");
  GenericTestUtils.assertExists(paxosDir);

  // Create new files in the paxos directory, which should get purged too.
  assertTrue(new File(paxosDir, "1").createNewFile());
  assertTrue(new File(paxosDir, "3").createNewFile());
  
  GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
      "1", "3");
  
  // Create some temporary files of the sort that are used during recovery.
  assertTrue(new File(curDir,
      "edits_inprogress_0000000000000000001.epoch=140").createNewFile());
  assertTrue(new File(curDir,
      "edits_inprogress_0000000000000000002.empty").createNewFile());
  
  qjm.purgeLogsOlderThan(3);
  
  // Log purging is asynchronous, so we have to wait for the calls
  // to be sent and respond before verifying.
  waitForAllPendingCalls(qjm.getLoggerSetForTests());
  
  // Older edits should be purged
  GenericTestUtils.assertGlobEquals(curDir, "edits_.*",
      NNStorage.getFinalizedEditsFileName(3, 3),
      NNStorage.getFinalizedEditsFileName(4, 4),
      NNStorage.getFinalizedEditsFileName(5, 5));
 
  // Older paxos files should be purged
  GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
      "3");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestQuorumJournalManager.java


注:本文中的org.apache.hadoop.test.GenericTestUtils.assertExists方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。