当前位置: 首页>>代码示例>>Java>>正文


Java NameNodeAdapter.leaveSafeMode方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.leaveSafeMode方法的典型用法代码示例。如果您正苦于以下问题:Java NameNodeAdapter.leaveSafeMode方法的具体用法?Java NameNodeAdapter.leaveSafeMode怎么用?Java NameNodeAdapter.leaveSafeMode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter的用法示例。


在下文中一共展示了NameNodeAdapter.leaveSafeMode方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testBlocksDeletedInEditLog

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Regression test for a bug experienced while developing
 * HDFS-2742. The scenario here is:
 * - image contains some blocks
 * - edits log contains at least one block addition, followed
 *   by deletion of more blocks than were added.
 * - When node starts up, some incorrect accounting of block
 *   totals caused an assertion failure.
 */
@Test
public void testBlocksDeletedInEditLog() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  // Make 4 blocks persisted in the image.
  DFSTestUtil.createFile(fs, new Path("/test"),
      4*BLOCK_SIZE, (short) 3, 1L);
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  
  // OP_ADD for 2 blocks
  DFSTestUtil.createFile(fs, new Path("/test2"),
      2*BLOCK_SIZE, (short) 3, 1L);
  
  // OP_DELETE for 4 blocks
  fs.delete(new Path("/test"), true);

  restartActive();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHASafeMode.java

示例2: testReadSnapshotFileWithCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
  Path foo = new Path("/foo");
  hdfs.mkdirs(foo);
  hdfs.allowSnapshot(foo);
  Path bar = new Path("/foo/bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  assertTrue(hdfs.delete(bar, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);

  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSnapshotBlocksMap.java

示例3: testWithCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test
public void testWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestOpenFilesWithSnapshot.java

示例4: testFilesDeletionWithCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.delete(new Path("/test/test/test2"), true);
  fs.delete(new Path("/test/test/test3"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
  
  // read snapshot file after restart
  String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test2");
  DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
  String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
      "s1/test/test3");
  DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestOpenFilesWithSnapshot.java

示例5: doTestMultipleSnapshots

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
private void doTestMultipleSnapshots(boolean saveNamespace)
    throws IOException {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);
  fs.createSnapshot(path, "s2");
  fs.delete(new Path("/test/test"), true);
  fs.deleteSnapshot(path, "s2");
  cluster.triggerBlockReports();
  if (saveNamespace) {
    NameNode nameNode = cluster.getNameNode();
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
    NameNodeAdapter.leaveSafeMode(nameNode);
  }
  cluster.restartNameNode(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestOpenFilesWithSnapshot.java

示例6: testOpenFilesWithRename

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      INodeId.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestOpenFilesWithSnapshot.java

示例7: testOpenFilesWithRename

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
  Path path = new Path("/test");
  doWriteAndAbort(fs, path);

  // check for zero sized blocks
  Path fileWithEmptyBlock = new Path("/test/test/test4");
  fs.create(fileWithEmptyBlock);
  NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
  String clientName = fs.getClient().getClientName();
  // create one empty block
  nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
      HdfsConstants.GRANDFATHER_INODE_ID, null);
  fs.createSnapshot(path, "s2");

  fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
  fs.delete(new Path("/test/test-renamed"), true);
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  cluster.restartNameNode(true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestOpenFilesWithSnapshot.java

示例8: testDownloadingLaterCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Test for downloading a checkpoint made at a later checkpoint
 * from the active.
 */
@Test
public void testDownloadingLaterCheckpoint() throws Exception {
  // Roll edit logs a few times to inflate txid
  nn0.getRpcServer().rollEditLog();
  nn0.getRpcServer().rollEditLog();
  // Make checkpoint
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
    .getFSImage().getMostRecentCheckpointTxId();
  assertEquals(6, expectedCheckpointTxId);

  int rc = BootstrapStandby.run(
      new String[]{"-force"},
      cluster.getConfiguration(1));
  assertEquals(0, rc);
  
  // Should have copied over the namespace from the active
  FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
      ImmutableList.of((int)expectedCheckpointTxId));
  FSImageTestUtil.assertNNFilesMatch(cluster);

  // We should now be able to start the standby successfully.
  cluster.restartNameNode(1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestBootstrapStandby.java

示例9: testReadRenamedSnapshotFileWithCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
  final Path foo = new Path("/foo");
  final Path foo2 = new Path("/foo2");
  hdfs.mkdirs(foo);
  hdfs.mkdirs(foo2);

  hdfs.allowSnapshot(foo);
  hdfs.allowSnapshot(foo2);
  final Path bar = new Path(foo, "bar");
  final Path bar2 = new Path(foo2, "bar");
  DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
  hdfs.createSnapshot(foo, "s1");
  // rename to another snapshottable directory and take snapshot
  assertTrue(hdfs.rename(bar, bar2));
  hdfs.createSnapshot(foo2, "s2");
  // delete the original renamed file to make sure blocks are not updated by
  // the original file
  assertTrue(hdfs.delete(bar2, true));

  // checkpoint
  NameNode nameNode = cluster.getNameNode();
  NameNodeAdapter.enterSafeMode(nameNode, false);
  NameNodeAdapter.saveNamespace(nameNode);
  NameNodeAdapter.leaveSafeMode(nameNode);
  // restart namenode to load snapshot files from fsimage
  cluster.restartNameNode(true);
  // file in first snapshot
  String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
  DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath));
  // file in second snapshot after rename+delete
  String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(),
      "s2/bar");
  DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestSnapshotBlocksMap.java

示例10: testDownloadingLaterCheckpoint

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Test for downloading a checkpoint made at a later checkpoint
 * from the active.
 */
@Test
public void testDownloadingLaterCheckpoint() throws Exception {
  // Roll edit logs a few times to inflate txid
  nn0.getRpcServer().rollEditLog();
  nn0.getRpcServer().rollEditLog();
  // Make checkpoint
  NameNodeAdapter.enterSafeMode(nn0, false);
  NameNodeAdapter.saveNamespace(nn0);
  NameNodeAdapter.leaveSafeMode(nn0);
  long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
      .getFSImage().getMostRecentCheckpointTxId();
  assertEquals(6, expectedCheckpointTxId);

  // advance the current txid
  cluster.getFileSystem(0).create(new Path("/test_txid"), (short)1).close();

  // obtain the content of seen_txid
  URI editsUri = cluster.getSharedEditsDir(0, maxNNCount - 1);
  long seen_txid_shared = FSImageTestUtil.getStorageTxId(nn0, editsUri);

  for (int i = 1; i < maxNNCount; i++) {
    assertEquals(0, forceBootstrap(i));

    // Should have copied over the namespace from the active
    LOG.info("Checking namenode: " + i);
    FSImageTestUtil.assertNNHasCheckpoints(cluster, i,
        ImmutableList.of((int) expectedCheckpointTxId));
  }
  FSImageTestUtil.assertNNFilesMatch(cluster);

  // Make sure the seen_txid was not modified by the standby
  assertEquals(seen_txid_shared,
      FSImageTestUtil.getStorageTxId(nn0, editsUri));

  // We should now be able to start the standby successfully.
  restartNameNodesFromIndex(1);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:TestBootstrapStandby.java

示例11: testDTManagerInSafeMode

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Test that the delegation token secret manager only runs when the
 * NN is out of safe mode. This is because the secret manager
 * has to log to the edit log, which should not be written in
 * safe mode. Regression test for HDFS-2579.
 */
@Test
public void testDTManagerInSafeMode() throws Exception {
  cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
  FileSystem fs = cluster.getFileSystem();
  for (int i = 0; i < 5; i++) {
    DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short)1, 1L);
  }
  cluster.getConfiguration(0).setInt(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500); 
  cluster.getConfiguration(0).setInt(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
  cluster.setWaitSafeMode(false);
  cluster.restartNameNode();
  NameNode nn = cluster.getNameNode();
  assertTrue(nn.isInSafeMode());
  DelegationTokenSecretManager sm =
    NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
  assertFalse("Secret manager should not run in safe mode", sm.isRunning());
  
  NameNodeAdapter.leaveSafeMode(nn);
  assertTrue("Secret manager should start when safe mode is exited",
      sm.isRunning());
  
  LOG.info("========= entering safemode again");
  
  NameNodeAdapter.enterSafeMode(nn, false);
  assertFalse("Secret manager should stop again when safe mode " +
      "is manually entered", sm.isRunning());
  
  // Set the cluster to leave safemode quickly on its own.
  cluster.getConfiguration(0).setInt(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
  cluster.setWaitSafeMode(true);
  cluster.restartNameNode();
  nn = cluster.getNameNode();
  sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());

  assertFalse(nn.isInSafeMode());
  assertTrue(sm.isRunning());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestDelegationToken.java

示例12: testClientRetrySafeMode

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Make sure the client retries when the active NN is in safemode
 */
@Test (timeout=300000)
public void testClientRetrySafeMode() throws Exception {
  final Map<Path, Boolean> results = Collections
      .synchronizedMap(new HashMap<Path, Boolean>());
  final Path test = new Path("/test");
  // let nn0 enter safemode
  NameNodeAdapter.enterSafeMode(nn0, false);
  SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState(
      nn0.getNamesystem(), "safeMode");
  Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000));
  LOG.info("enter safemode");
  new Thread() {
    @Override
    public void run() {
      try {
        boolean mkdir = fs.mkdirs(test);
        LOG.info("mkdir finished, result is " + mkdir);
        synchronized (TestHASafeMode.this) {
          results.put(test, mkdir);
          TestHASafeMode.this.notifyAll();
        }
      } catch (Exception e) {
        LOG.info("Got Exception while calling mkdir", e);
      }
    }
  }.start();
  
  // make sure the client's call has actually been handled by the active NN
  assertFalse("The directory should not be created while NN in safemode",
      fs.exists(test));
  
  Thread.sleep(1000);
  // let nn0 leave safemode
  NameNodeAdapter.leaveSafeMode(nn0);
  LOG.info("leave safemode");
  
  synchronized (this) {
    while (!results.containsKey(test)) {
      this.wait();
    }
    assertTrue(results.get(test));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestHASafeMode.java

示例13: testClientRetrySafeMode

import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入方法依赖的package包/类
/**
 * Make sure the client retries when the active NN is in safemode
 */
@Test (timeout=300000)
public void testClientRetrySafeMode() throws Exception {
  final Map<Path, Boolean> results = Collections
      .synchronizedMap(new HashMap<Path, Boolean>());
  final Path test = new Path("/test");
  // let nn0 enter safemode
  cluster.getConfiguration(0).setInt(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 3);
  NameNodeAdapter.enterSafeMode(nn0, false);
  Whitebox.setInternalState(nn0.getNamesystem(), "manualSafeMode", false);
  BlockManagerTestUtil.setStartupSafeModeForTest(nn0.getNamesystem()
      .getBlockManager());
  assertTrue(nn0.getNamesystem().isInStartupSafeMode());
  LOG.info("enter safemode");
  new Thread() {
    @Override
    public void run() {
      try {
        boolean mkdir = fs.mkdirs(test);
        LOG.info("mkdir finished, result is " + mkdir);
        synchronized (TestHASafeMode.this) {
          results.put(test, mkdir);
          TestHASafeMode.this.notifyAll();
        }
      } catch (Exception e) {
        LOG.info("Got Exception while calling mkdir", e);
      }
    }
  }.start();
  
  // make sure the client's call has actually been handled by the active NN
  assertFalse("The directory should not be created while NN in safemode",
      fs.exists(test));
  
  Thread.sleep(1000);
  // let nn0 leave safemode
  NameNodeAdapter.leaveSafeMode(nn0);
  LOG.info("leave safemode");
  
  synchronized (this) {
    while (!results.containsKey(test)) {
      this.wait();
    }
    assertTrue(results.get(test));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:50,代码来源:TestHASafeMode.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.leaveSafeMode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。