本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory.getCurrentDir方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDirectory.getCurrentDir方法的具体用法?Java StorageDirectory.getCurrentDir怎么用?Java StorageDirectory.getCurrentDir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory
的用法示例。
在下文中一共展示了StorageDirectory.getCurrentDir方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doPreUpgrade
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* Perform any steps that must succeed across all storage dirs/JournalManagers
* involved in an upgrade before proceeding onto the actual upgrade stage. If
* a call to any JM's or local storage dir's doPreUpgrade method fails, then
* doUpgrade will not be called for any JM. The existing current dir is
* renamed to previous.tmp, and then a new, empty current dir is created.
*
* @param conf configuration for creating {@link EditLogFileOutputStream}
* @param sd the storage directory to perform the pre-upgrade procedure.
* @throws IOException in the event of error
*/
static void doPreUpgrade(Configuration conf, StorageDirectory sd)
throws IOException {
LOG.info("Starting upgrade of storage directory " + sd.getRoot());
// rename current to tmp
renameCurToTmp(sd);
final File curDir = sd.getCurrentDir();
final File tmpDir = sd.getPreviousTmp();
List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return dir.equals(tmpDir)
&& name.startsWith(NNStorage.NameNodeFile.EDITS.getName());
}
});
for (String s : fileNameList) {
File prevFile = new File(tmpDir, s);
File newFile = new File(curDir, prevFile.getName());
Files.createLink(newFile.toPath(), prevFile.toPath());
}
}
示例2: renameCurToTmp
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* Rename the existing current dir to previous.tmp, and create a new empty
* current dir.
*/
public static void renameCurToTmp(StorageDirectory sd) throws IOException {
File curDir = sd.getCurrentDir();
File prevDir = sd.getPreviousDir();
final File tmpDir = sd.getPreviousTmp();
Preconditions.checkState(curDir.exists(),
"Current directory must exist for preupgrade.");
Preconditions.checkState(!prevDir.exists(),
"Previous directory must not exist for preupgrade.");
Preconditions.checkState(!tmpDir.exists(),
"Previous.tmp directory must not exist for preupgrade."
+ "Consider restarting for recovery.");
// rename current to tmp
NNStorage.rename(curDir, tmpDir);
if (!curDir.mkdir()) {
throw new IOException("Cannot create directory " + curDir);
}
}
示例3: doRollBack
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* Perform rollback of the storage dir to the previous state. The existing
* current dir is removed, and the previous dir is renamed to current.
*
* @param sd the storage directory to roll back.
* @throws IOException in the event of error
*/
static void doRollBack(StorageDirectory sd)
throws IOException {
File prevDir = sd.getPreviousDir();
if (!prevDir.exists()) {
return;
}
File tmpDir = sd.getRemovedTmp();
Preconditions.checkState(!tmpDir.exists(),
"removed.tmp directory must not exist for rollback."
+ "Consider restarting for recovery.");
// rename current to tmp
File curDir = sd.getCurrentDir();
Preconditions.checkState(curDir.exists(),
"Current directory must exist for rollback.");
NNStorage.rename(curDir, tmpDir);
// rename previous to current
NNStorage.rename(prevDir, curDir);
// delete tmp dir
NNStorage.deleteDir(tmpDir);
LOG.info("Rollback of " + sd.getRoot() + " is complete.");
}
示例4: ensureCurrentDirExists
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* Ensure that the current/ directory exists in all storage
* directories
*/
void ensureCurrentDirExists() throws IOException {
for (Iterator<StorageDirectory> it
= storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File curDir = sd.getCurrentDir();
if (!curDir.exists() && !curDir.mkdirs()) {
throw new IOException("Could not create directory " + curDir);
}
}
}
示例5: verifyEditLogs
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
private long verifyEditLogs(FSNamesystem namesystem, FSImage fsimage,
String logFileName, long startTxId)
throws IOException {
long numEdits = -1;
// Verify that we can read in all the transactions that we have written.
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
for (StorageDirectory sd :
fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
File editFile = new File(sd.getCurrentDir(), logFileName);
System.out.println("Verifying file: " + editFile);
FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId);
long numEditsThisLog = loader.loadFSEdits(
new EditLogFileInputStream(editFile), startTxId);
System.out.println("Number of edits: " + numEditsThisLog);
assertTrue(numEdits == -1 || numEditsThisLog == numEdits);
numEdits = numEditsThisLog;
}
assertTrue(numEdits != -1);
return numEdits;
}
示例6: assertExistsInStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
private void assertExistsInStorageDirs(MiniDFSCluster cluster,
NameNodeDirType dirType,
String filename) {
NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(dirType)) {
File f = new File(sd.getCurrentDir(), filename);
assertTrue("Expect that " + f + " exists", f.exists());
}
}
示例7: findLatestEditsLog
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* @return the latest edits log, finalized or otherwise, from the given
* storage directory.
*/
public static EditLogFile findLatestEditsLog(StorageDirectory sd)
throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> foundEditLogs
= Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir));
return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID);
}
示例8: logStorageContents
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
public static void logStorageContents(Log LOG, NNStorage storage) {
LOG.info("current storages and corresponding sizes:");
for (StorageDirectory sd : storage.dirIterable(null)) {
File curDir = sd.getCurrentDir();
LOG.info("In directory " + curDir);
File[] files = curDir.listFiles();
Arrays.sort(files);
for (File f : files) {
LOG.info(" file " + f.getAbsolutePath() + "; len = " + f.length());
}
}
}
示例9: testCancelSaveNamespace
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
@Test(timeout=20000)
public void testCancelSaveNamespace() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
final FSImage image = fsn.getFSImage();
NNStorage storage = image.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
FSNamesystem spyFsn = spy(fsn);
final FSNamesystem finalFsn = spyFsn;
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
BlockIdManager bid = spy(spyFsn.getBlockIdManager());
Whitebox.setInternalState(finalFsn, "blockIdManager", bid);
doAnswer(delayer).when(bid).getGenerationStampV2();
ExecutorService pool = Executors.newFixedThreadPool(2);
try {
doAnEdit(fsn, 1);
final Canceler canceler = new Canceler();
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
Future<Void> saverFuture = pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler);
return null;
}
});
// Wait until saveNamespace calls getGenerationStamp
delayer.waitForCall();
// then cancel the saveNamespace
Future<Void> cancelFuture = pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
canceler.cancel("cancelled");
return null;
}
});
// give the cancel call time to run
Thread.sleep(500);
// allow saveNamespace to proceed - it should check the cancel flag after
// this point and throw an exception
delayer.proceed();
cancelFuture.get();
saverFuture.get();
fail("saveNamespace did not fail even though cancelled!");
} catch (Throwable t) {
GenericTestUtils.assertExceptionContains(
"SaveNamespaceCancelledException", t);
}
LOG.info("Successfully cancelled a saveNamespace");
// Check that we have only the original image and not any
// cruft left over from half-finished images
FSImageTestUtil.logStorageContents(LOG, storage);
for (StorageDirectory sd : storage.dirIterable(null)) {
File curDir = sd.getCurrentDir();
GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*",
NNStorage.getImageFileName(0),
NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
}
} finally {
fsn.close();
}
}
示例10: testCheckpointWithFailedStorageDir
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* Test that, if a storage directory is failed when a checkpoint occurs,
* the non-failed storage directory receives the checkpoint.
*/
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of a volume -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
StorageDirectory sd1 = storage.getStorageDir(1);
currentDir = sd0.getCurrentDir();
FileUtil.setExecutable(currentDir, false);
// Upload checkpoint when NN has a bad storage dir. This should
// succeed and create the checkpoint in the good dir.
secondary.doCheckpoint();
GenericTestUtils.assertExists(
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
// Restore the good dir
FileUtil.setExecutable(currentDir, true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to both dirs
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.setExecutable(currentDir, true);
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
示例11: testCheckpointWithSeparateDirsAfterNameFails
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入方法依赖的package包/类
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
File base_dir = new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary1")).toString());
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.manageNameDfsDirs(false).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of its only name dir -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
currentDir = sd0.getCurrentDir();
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));
// Try to upload checkpoint -- this should fail since there are no
// valid storage dirs
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"No targets in destination storage", ioe);
}
// Restore the good dir
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to the restored name dir
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(), "755");
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}