当前位置: 首页>>代码示例>>Java>>正文


Java NamenodeProtocols.restoreFailedStorage方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.restoreFailedStorage方法的典型用法代码示例。如果您正苦于以下问题:Java NamenodeProtocols.restoreFailedStorage方法的具体用法?Java NamenodeProtocols.restoreFailedStorage怎么用?Java NamenodeProtocols.restoreFailedStorage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols的用法示例。


在下文中一共展示了NamenodeProtocols.restoreFailedStorage方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCheckpointWithFailedStorageDir

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test that, if a storage directory is failed when a checkpoint occurs,
 * the non-failed storage directory receives the checkpoint.
 */
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of a volume -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    StorageDirectory sd1 = storage.getStorageDir(1);
    
    currentDir = sd0.getCurrentDir();
    FileUtil.setExecutable(currentDir, false);

    // Upload checkpoint when NN has a bad storage dir. This should
    // succeed and create the checkpoint in the good dir.
    secondary.doCheckpoint();
    
    GenericTestUtils.assertExists(
        new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
    
    // Restore the good dir
    FileUtil.setExecutable(currentDir, true);
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to both dirs
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.setExecutable(currentDir, true);
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestCheckpoint.java

示例2: testCheckpointWithSeparateDirsAfterNameFails

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test case where the NN is configured with a name-only and an edits-only
 * dir, with storage-restore turned on. In this case, if the name-only dir
 * disappears and comes back, a new checkpoint after it has been restored
 * should function correctly.
 * @throws Exception
 */
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  File base_dir = new File(MiniDFSCluster.getBaseDirectory());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/name-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/edits-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(base_dir, "namesecondary1")).toString());

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
        .manageNameDfsDirs(false).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of its only name dir -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
    currentDir = sd0.getCurrentDir();
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

    // Try to upload checkpoint -- this should fail since there are no
    // valid storage dirs
    try {
      secondary.doCheckpoint();
      fail("Did not fail to checkpoint when there are no valid storage dirs");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "No targets in destination storage", ioe);
    }
    
    // Restore the good dir
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to the restored name dir
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.chmod(currentDir.getAbsolutePath(), "755");
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:73,代码来源:TestCheckpoint.java

示例3: testCheckpointWithFailedStorageDir

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test that, if a storage directory is failed when a checkpoint occurs,
 * the non-failed storage directory receives the checkpoint.
 */
@SuppressWarnings("deprecation")
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of a volume -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    StorageDirectory sd1 = storage.getStorageDir(1);
    
    currentDir = sd0.getCurrentDir();
    FileUtil.setExecutable(currentDir, false);

    // Upload checkpoint when NN has a bad storage dir. This should
    // succeed and create the checkpoint in the good dir.
    secondary.doCheckpoint();
    
    GenericTestUtils.assertExists(
        new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
    
    // Restore the good dir
    FileUtil.setExecutable(currentDir, true);
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to both dirs
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.setExecutable(currentDir, true);
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:60,代码来源:TestCheckpoint.java

示例4: testCheckpointWithSeparateDirsAfterNameFails

import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
 * Test case where the NN is configured with a name-only and an edits-only
 * dir, with storage-restore turned on. In this case, if the name-only dir
 * disappears and comes back, a new checkpoint after it has been restored
 * should function correctly.
 * @throws Exception
 */
@SuppressWarnings("deprecation")
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File currentDir = null;
  
  Configuration conf = new HdfsConfiguration();

  File base_dir = new File(MiniDFSCluster.getBaseDirectory());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/name-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      MiniDFSCluster.getBaseDirectory() + "/edits-only");
  conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(base_dir, "namesecondary1")).toString());

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
        .manageNameDfsDirs(false).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN experiences failure of its only name dir -- fake by
    // setting its current dir to a-x permissions
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    StorageDirectory sd0 = storage.getStorageDir(0);
    assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
    currentDir = sd0.getCurrentDir();
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

    // Try to upload checkpoint -- this should fail since there are no
    // valid storage dirs
    try {
      secondary.doCheckpoint();
      fail("Did not fail to checkpoint when there are no valid storage dirs");
    } catch (IOException ioe) {
      GenericTestUtils.assertExceptionContains(
          "No targets in destination storage", ioe);
    }
    
    // Restore the good dir
    assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
    nn.restoreFailedStorage("true");
    nn.rollEditLog();

    // Checkpoint again -- this should upload to the restored name dir
    secondary.doCheckpoint();
    
    assertNNHasCheckpoints(cluster, ImmutableList.of(8));
    assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
  } finally {
    if (currentDir != null) {
      FileUtil.chmod(currentDir.getAbsolutePath(), "755");
    }
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:74,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.restoreFailedStorage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。