当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getBaseDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getBaseDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getBaseDirectory方法的具体用法?Java MiniDFSCluster.getBaseDirectory怎么用?Java MiniDFSCluster.getBaseDirectory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getBaseDirectory方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFSNamespaceClearLeases

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test that FSNamesystem#clear clears all leases.
 */
@Test
public void testFSNamespaceClearLeases() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
  LeaseManager leaseMan = fsn.getLeaseManager();
  leaseMan.addLease("client1", "importantFile");
  assertEquals(1, leaseMan.countLease());
  fsn.clear();
  leaseMan = fsn.getLeaseManager();
  assertEquals(0, leaseMan.countLease());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFSNamesystem.java

示例2: testThatMatchingRPCandHttpPortsThrowException

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests setting the rpc port to the same as the web port to test that 
 * an exception
 * is thrown when trying to re-use the same port
 */
@Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException() 
    throws IOException {

  NameNode nameNode = null;
  try {
    Configuration conf = new HdfsConfiguration();
    File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        nameDir.getAbsolutePath());

    Random rand = new Random();
    final int port = 30000 + rand.nextInt(30000);

    // set both of these to the same port. It should fail.
    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
    DFSTestUtil.formatNameNode(conf);
    nameNode = new NameNode(conf);
  } finally {
    if (nameNode != null) {
      nameNode.stop();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestValidateConfigurationSettings.java

示例3: setUp

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory());

  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      new File(hdfsDir, "data").getPath());
  config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "secondary")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      WILDCARD_HTTP_HOST + "0");
  
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestStartup.java

示例4: testInvalidateOverReplicatedBlock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例5: MiniJournalCluster

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private MiniJournalCluster(Builder b) throws IOException {
  LOG.info("Starting MiniJournalCluster with " +
      b.numJournalNodes + " journal nodes");
  
  if (b.baseDir != null) {
    this.baseDir = new File(b.baseDir);
  } else {
    this.baseDir = new File(MiniDFSCluster.getBaseDirectory());
  }

  nodes = new JNInfo[b.numJournalNodes];

  for (int i = 0; i < b.numJournalNodes; i++) {
    if (b.format) {
      File dir = getStorageDir(i);
      LOG.debug("Fully deleting JN directory " + dir);
      FileUtil.fullyDelete(dir);
    }
    JournalNode jn = new JournalNode();
    jn.setConf(createConfForNode(b, i));
    jn.start();
    nodes[i] = new JNInfo(jn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:MiniJournalCluster.java

示例6: setup

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
      File.separator + "TestJournalNode");
  FileUtil.fullyDelete(editsDir);
  
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
      editsDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
      "0.0.0.0:0");
  jn = new JournalNode();
  jn.setConf(conf);
  jn.start();
  journalId = "test-journalid-" + GenericTestUtils.uniqueSequenceId();
  journal = jn.getOrCreateJournal(journalId);
  journal.format(FAKE_NSINFO);
  
  ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestJournalNode.java

示例7: testCheckpointCancellation

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test cancellation of ongoing checkpoints when failover happens
 * mid-checkpoint. 
 */
@Test(timeout=120000)
public void testCheckpointCancellation() throws Exception {
  cluster.transitionToStandby(0);
  
  // Create an edit log in the shared edits dir with a lot
  // of mkdirs operations. This is solely so that the image is
  // large enough to take a non-trivial amount of time to load.
  // (only ~15MB)
  URI sharedUri = cluster.getSharedEditsDir(0, 1);
  File sharedDir = new File(sharedUri.getPath(), "current");
  File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
      "testCheckpointCancellation-tmp");
  FSNamesystem fsn = cluster.getNamesystem(0);
  FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
      fsn.getFSDirectory().getLastInodeId() + 1);
  String fname = NNStorage.getInProgressEditsFileName(3); 
  new File(tmpDir, fname).renameTo(new File(sharedDir, fname));

  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);

  cluster.transitionToActive(0);    
  
  boolean canceledOne = false;
  for (int i = 0; i < 10 && !canceledOne; i++) {
    
    doEdits(i*10, i*10 + 10);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
    canceledOne = StandbyCheckpointer.getCanceledCount() > 0;
  }
  
  assertTrue(canceledOne);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestStandbyCheckpoints.java

示例8: testNNThroughput

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
 */
@Test
public void testNNThroughput() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());
  FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  DFSTestUtil.formatNameNode(conf);
  String[] args = new String[] {"-op", "all"};
  NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestNNThroughputBenchmark.java

示例9: getConf

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private Configuration getConf() throws IOException {
  String baseDir = MiniDFSCluster.getBaseDirectory();
  String nameDirs = fileAsURI(new File(baseDir, "name1")) + "," + 
                    fileAsURI(new File(baseDir, "name2"));

  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestSaveNamespace.java

示例10: testSeparateEditsDirLocking

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test that, if the edits dir is separate from the name dir, it is
 * properly locked.
 **/
@Test
public void testSeparateEditsDirLocking() throws IOException {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  File editsDir = new File(MiniDFSCluster.getBaseDirectory(),
      "testSeparateEditsDirLocking");

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDir.getAbsolutePath());
  MiniDFSCluster cluster = null;
  
  // Start a NN, and verify that lock() fails in all of the configured
  // directories
  StorageDirectory savedSd = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
        .numDataNodes(0).build();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
      assertEquals(editsDir.getAbsoluteFile(), sd.getRoot());
      assertLockFails(sd);
      savedSd = sd;
    }
  } finally {
    cleanup(cluster);
    cluster = null;
  }
  assertNotNull(savedSd);
  
  // Lock one of the saved directories, then start the NN, and make sure it
  // fails to start
  assertClusterStartFailsWhenDirLocked(conf, savedSd);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestCheckpoint.java

示例11: testThatDifferentRPCandHttpPortsAreOK

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests setting the rpc port to a different as the web port that an 
 * exception is NOT thrown 
 */
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() 
    throws IOException {

  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());

  Random rand = new Random();

  // A few retries in case the ports we choose are in use.
  for (int i = 0; i < 5; ++i) {
    final int port1 = 30000 + rand.nextInt(10000);
    final int port2 = port1 + 1 + rand.nextInt(10000);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
    DFSTestUtil.formatNameNode(conf);
    NameNode nameNode = null;

    try {
      nameNode = new NameNode(conf); // should be OK!
      break;
    } catch(BindException be) {
      continue;     // Port in use? Try another.
    } finally {
      if (nameNode != null) {
        nameNode.stop();
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:TestValidateConfigurationSettings.java

示例12: testGenericKeysForNameNodeFormat

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestValidateConfigurationSettings.java

示例13: testSecondaryImageDownload

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test that the secondary doesn't have to re-download image
 * if it hasn't changed.
 */
@Test
public void testSecondaryImageDownload() throws IOException {
  LOG.info("Starting testSecondaryImageDownload");
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  Path dir = new Path("/checkpoint");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                             .numDataNodes(numDatanodes)
                                             .format(true).build();
  cluster.waitActive();
  FileSystem fileSys = cluster.getFileSystem();
  FSImage image = cluster.getNameNode().getFSImage();
  SecondaryNameNode secondary = null;
  try {
    assertTrue(!fileSys.exists(dir));
    //
    // Make the checkpoint
    //
    secondary = startSecondaryNameNode(conf);

    File secondaryDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary1");
    File secondaryCurrent = new File(secondaryDir, "current");

    long expectedTxIdToDownload = cluster.getNameNode().getFSImage()
    .getStorage().getMostRecentCheckpointTxId();

    File secondaryFsImageBefore = new File(secondaryCurrent,
        NNStorage.getImageFileName(expectedTxIdToDownload));
    File secondaryFsImageAfter = new File(secondaryCurrent,
        NNStorage.getImageFileName(expectedTxIdToDownload + 2));
    
    assertFalse("Secondary should start with empty current/ dir " +
        "but " + secondaryFsImageBefore + " exists",
        secondaryFsImageBefore.exists());

    assertTrue("Secondary should have loaded an image",
        secondary.doCheckpoint());
    
    assertTrue("Secondary should have downloaded original image",
        secondaryFsImageBefore.exists());
    assertTrue("Secondary should have created a new image",
        secondaryFsImageAfter.exists());
    
    long fsimageLength = secondaryFsImageBefore.length();
    assertEquals("Image size should not have changed",
        fsimageLength,
        secondaryFsImageAfter.length());

    // change namespace
    fileSys.mkdirs(dir);
    
    assertFalse("Another checkpoint should not have to re-load image",
        secondary.doCheckpoint());
    
    for (StorageDirectory sd :
      image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
      File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE,
          expectedTxIdToDownload + 5);
      assertTrue("Image size increased",
          imageFile.length() > fsimageLength);
    }

  } finally {
    fileSys.close();
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:75,代码来源:TestCheckpoint.java

示例14: testReformatNNBetweenCheckpoints

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test case where the name node is reformatted while the secondary namenode
 * is running. The secondary should shut itself down if if talks to a NN
 * with the wrong namespace.
 */
@Test
public void testReformatNNBetweenCheckpoints() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      1);

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();
    int origPort = cluster.getNameNodePort();
    int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
    Configuration snnConf = new Configuration(conf);
    File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
      "namesecondary");
    snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      checkpointDir.getAbsolutePath());
    secondary = startSecondaryNameNode(snnConf);

    // secondary checkpoints once
    secondary.doCheckpoint();

    // we reformat primary NN
    cluster.shutdown();
    cluster = null;

    // Brief sleep to make sure that the 2NN's IPC connection to the NN
    // is dropped.
    try {
      Thread.sleep(100);
    } catch (InterruptedException ie) {
    }
    
    // Start a new NN with the same host/port.
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .nameNodePort(origPort)
        .nameNodeHttpPort(origHttpPort)
        .format(true).build();

    try {
      secondary.doCheckpoint();
      fail("Should have failed checkpoint against a different namespace");
    } catch (IOException ioe) {
      LOG.info("Got expected failure", ioe);
      assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
    }
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:TestCheckpoint.java

示例15: testCheckpointWithSeparateDirsAfterNameFails

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test case where the NN is configured with a name-only and an edits-only
 * dir, with storage-restore turned on. In this case, if the name-only dir
 * disappears and comes back, a new checkpoint after it has been restored
 * should function correctly.
 * @throws Exception
 */
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  File base_dir = new File(MiniDFSCluster.getBaseDirectory());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/name-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/edits-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(base_dir, "namesecondary1")).toString());

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
        .manageNameDfsDirs(false).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of its only name dir -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
    currentDir = sd0.getCurrentDir();
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

    // Try to upload checkpoint -- this should fail since there are no
    // valid storage dirs
    try {
      secondary.doCheckpoint();
      fail("Did not fail to checkpoint when there are no valid storage dirs");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "No targets in destination storage", ioe);
    }
    
    // Restore the good dir
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to the restored name dir
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.chmod(currentDir.getAbsolutePath(), "755");
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getBaseDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。