本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols.rollEditLog方法的典型用法代码示例。如果您正苦于以下问题:Java NamenodeProtocols.rollEditLog方法的具体用法?Java NamenodeProtocols.rollEditLog怎么用?Java NamenodeProtocols.rollEditLog使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols
的用法示例。
在下文中一共展示了NamenodeProtocols.rollEditLog方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCheckpointSignature
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@Test
public void testCheckpointSignature() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
secondary = startSecondaryNameNode(conf);
// prepare checkpoint image
secondary.doCheckpoint();
CheckpointSignature sig = nnRpc.rollEditLog();
// manipulate the CheckpointSignature fields
sig.setBlockpoolID("somerandomebpid");
sig.clusterID = "somerandomcid";
try {
sig.validateStorageInfo(nn.getFSImage()); // this should fail
assertTrue("This test is expected to fail.", false);
} catch (Exception ignored) {
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
示例2: testCheckpointSignature
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
@Test
public void testCheckpointSignature() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
secondary = startSecondaryNameNode(conf);
// prepare checkpoint image
secondary.doCheckpoint();
CheckpointSignature sig = nnRpc.rollEditLog();
// manipulate the CheckpointSignature fields
sig.setBlockpoolID("somerandomebpid");
sig.clusterID = "somerandomcid";
try {
sig.validateStorageInfo(nn.getFSImage()); // this should fail
assertTrue("This test is expected to fail.", false);
} catch (Exception ignored) {
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
示例3: testBackupNodeTailsEdits
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Ensure that the backupnode will tail edits from the NN
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test
public void testBackupNodeTailsEdits() throws Exception {
Configuration conf = new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
BackupNode backup = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
fileSys = cluster.getFileSystem();
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
BackupImage bnImage = (BackupImage) backup.getFSImage();
testBNInSync(cluster, backup, 1);
// Force a roll -- BN should roll with NN.
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
nnRpc.rollEditLog();
assertEquals(bnImage.getEditLog().getCurSegmentTxId(),
nn.getFSImage().getEditLog().getCurSegmentTxId());
// BN should stay in sync after roll
testBNInSync(cluster, backup, 2);
long nnImageBefore =
nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
// BN checkpoint
backup.doCheckpoint();
// NN should have received a new image
long nnImageAfter =
nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
assertTrue("nn should have received new checkpoint. before: " +
nnImageBefore + " after: " + nnImageAfter,
nnImageAfter > nnImageBefore);
// BN should stay in sync after checkpoint
testBNInSync(cluster, backup, 3);
// Stop BN
StorageDirectory sd = bnImage.getStorage().getStorageDir(0);
backup.stop();
backup = null;
// When shutting down the BN, it shouldn't finalize logs that are
// still open on the NN
EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
assertEquals(editsLog.getFirstTxId(),
nn.getFSImage().getEditLog().getCurSegmentTxId());
assertTrue("Should not have finalized " + editsLog,
editsLog.isInProgress());
// do some edits
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
// start a new backup node
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
testBNInSync(cluster, backup, 4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
} finally {
LOG.info("Shutting down...");
if (backup != null) backup.stop();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
assertStorageDirsMatch(cluster.getNameNode(), backup);
}
示例4: testCheckpointWithFailedStorageDir
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test that, if a storage directory is failed when a checkpoint occurs,
* the non-failed storage directory receives the checkpoint.
*/
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of a volume -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
StorageDirectory sd1 = storage.getStorageDir(1);
currentDir = sd0.getCurrentDir();
FileUtil.setExecutable(currentDir, false);
// Upload checkpoint when NN has a bad storage dir. This should
// succeed and create the checkpoint in the good dir.
secondary.doCheckpoint();
GenericTestUtils.assertExists(
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
// Restore the good dir
FileUtil.setExecutable(currentDir, true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to both dirs
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.setExecutable(currentDir, true);
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
示例5: testCheckpointWithSeparateDirsAfterNameFails
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
File base_dir = new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary1")).toString());
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.manageNameDfsDirs(false).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of its only name dir -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
currentDir = sd0.getCurrentDir();
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));
// Try to upload checkpoint -- this should fail since there are no
// valid storage dirs
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"No targets in destination storage", ioe);
}
// Restore the good dir
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to the restored name dir
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(), "755");
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
示例6: testEditLogRolling
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test
public void testEditLogRolling() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final NamenodeProtocols nn = cluster.getNameNode().getRpcServer();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(nn, caughtErr);
long previousLogTxId = 1;
for (int i = 0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
} catch (InterruptedException e) {}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig = nn.rollEditLog();
long nextLog = sig.curSegmentTxId;
String logFileName = NNStorage.getFinalizedEditsFileName(
previousLogTxId, nextLog - 1);
previousLogTxId += verifyEditLogs(cluster.getNamesystem(), fsimage,
logFileName, previousLogTxId);
assertEquals(previousLogTxId, nextLog);
File expectedLog = NNStorage.getInProgressEditsFile(sd, previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist", expectedLog.exists());
}
} finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
示例7: testCheckpointWithFailedStorageDir
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test that, if a storage directory is failed when a checkpoint occurs,
* the non-failed storage directory receives the checkpoint.
*/
@SuppressWarnings("deprecation")
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of a volume -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
StorageDirectory sd1 = storage.getStorageDir(1);
currentDir = sd0.getCurrentDir();
FileUtil.setExecutable(currentDir, false);
// Upload checkpoint when NN has a bad storage dir. This should
// succeed and create the checkpoint in the good dir.
secondary.doCheckpoint();
GenericTestUtils.assertExists(
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
// Restore the good dir
FileUtil.setExecutable(currentDir, true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to both dirs
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.setExecutable(currentDir, true);
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
示例8: testCheckpointWithSeparateDirsAfterNameFails
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; //导入方法依赖的package包/类
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@SuppressWarnings("deprecation")
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
File base_dir = new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary1")).toString());
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.manageNameDfsDirs(false).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of its only name dir -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
currentDir = sd0.getCurrentDir();
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));
// Try to upload checkpoint -- this should fail since there are no
// valid storage dirs
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"No targets in destination storage", ioe);
}
// Restore the good dir
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to the restored name dir
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(), "755");
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}