本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory类的典型用法代码示例。如果您正苦于以下问题:Java StorageDirectory类的具体用法?Java StorageDirectory怎么用?Java StorageDirectory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StorageDirectory类属于org.apache.hadoop.hdfs.server.common.Storage包,在下文中一共展示了StorageDirectory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: finalizeUpgrade
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
void finalizeUpgrade(boolean finalizeEditLog) throws IOException {
LOG.info("Finalizing upgrade for local dirs. " +
(storage.getLayoutVersion() == 0 ? "" :
"\n cur LV = " + storage.getLayoutVersion()
+ "; cur CTime = " + storage.getCTime()));
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doFinalize(sd);
}
if (finalizeEditLog) {
// We only do this in the case that HA is enabled and we're active. In any
// other case the NN will have done the upgrade of the edits directories
// already by virtue of the fact that they're local.
editLog.doFinalizeOfSharedLog();
}
isUpgradeFinalized = true;
}
示例2: renameCheckpoint
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Rename FSImage with the specific txid
*/
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
NameNodeFile toNnf, boolean renameMD5) throws IOException {
ArrayList<StorageDirectory> al = null;
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
try {
renameImageFileInDir(sd, fromNnf, toNnf, txid, renameMD5);
} catch (IOException ioe) {
LOG.warn("Unable to rename checkpoint in " + sd, ioe);
if (al == null) {
al = Lists.newArrayList();
}
al.add(sd);
}
}
if(al != null) storage.reportErrorsOnDirectories(al);
}
示例3: renameImageFileInDir
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
// renameTo fails on Windows if the destination file already exists.
if(LOG.isDebugEnabled()) {
LOG.debug("renaming " + fromFile.getAbsolutePath()
+ " to " + toFile.getAbsolutePath());
}
if (!fromFile.renameTo(toFile)) {
if (!toFile.delete() || !fromFile.renameTo(toFile)) {
throw new IOException("renaming " + fromFile.getAbsolutePath() + " to " +
toFile.getAbsolutePath() + " FAILED");
}
}
if (renameMD5) {
MD5FileUtils.renameMD5File(fromFile, toFile);
}
}
示例4: readCheckpointTime
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Determine the checkpoint time of the specified StorageDirectory
*
* @param sd StorageDirectory to check
* @return If file exists and can be read, last checkpoint time. If not, 0L.
* @throws IOException On errors processing file pointed to by sd
*/
static long readCheckpointTime(StorageDirectory sd) throws IOException {
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
long timeStamp = 0L;
if (timeFile.exists() && FileUtil.canRead(timeFile)) {
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
try {
timeStamp = in.readLong();
in.close();
in = null;
} finally {
IOUtils.cleanup(LOG, in);
}
}
return timeStamp;
}
示例5: canRollBack
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Return true if this storage dir can roll back to the previous storage
* state, false otherwise. The NN will refuse to run the rollback operation
* unless at least one JM or fsimage storage directory can roll back.
*
* @param storage the storage info for the current state
* @param prevStorage the storage info for the previous (unupgraded) state
* @param targetLayoutVersion the layout version we intend to roll back to
* @return true if this JM can roll back, false otherwise.
* @throws IOException in the event of error
*/
static boolean canRollBack(StorageDirectory sd, StorageInfo storage,
StorageInfo prevStorage, int targetLayoutVersion) throws IOException {
File prevDir = sd.getPreviousDir();
if (!prevDir.exists()) { // use current directory then
LOG.info("Storage directory " + sd.getRoot()
+ " does not contain previous fs state.");
// read and verify consistency with other directories
storage.readProperties(sd);
return false;
}
// read and verify consistency of the prev dir
prevStorage.readPreviousVersionProperties(sd);
if (prevStorage.getLayoutVersion() != targetLayoutVersion) {
throw new IOException(
"Cannot rollback to storage version " +
prevStorage.getLayoutVersion() +
" using this version of the NameNode, which uses storage version " +
targetLayoutVersion + ". " +
"Please use the previous version of HDFS to perform the rollback.");
}
return true;
}
示例6: doFinalize
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Finalize the upgrade. The previous dir, if any, will be renamed and
* removed. After this is completed, rollback is no longer allowed.
*
* @param sd the storage directory to finalize
* @throws IOException in the event of error
*/
static void doFinalize(StorageDirectory sd) throws IOException {
File prevDir = sd.getPreviousDir();
if (!prevDir.exists()) { // already discarded
LOG.info("Directory " + prevDir + " does not exist.");
LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required.");
return;
}
LOG.info("Finalizing upgrade of storage directory " + sd.getRoot());
Preconditions.checkState(sd.getCurrentDir().exists(),
"Current directory must exist.");
final File tmpDir = sd.getFinalizedTmp();
// rename previous to tmp and remove
NNStorage.rename(prevDir, tmpDir);
NNStorage.deleteDir(tmpDir);
LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete.");
}
示例7: doPreUpgrade
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Perform any steps that must succeed across all storage dirs/JournalManagers
* involved in an upgrade before proceeding onto the actual upgrade stage. If
* a call to any JM's or local storage dir's doPreUpgrade method fails, then
* doUpgrade will not be called for any JM. The existing current dir is
* renamed to previous.tmp, and then a new, empty current dir is created.
*
* @param conf configuration for creating {@link EditLogFileOutputStream}
* @param sd the storage directory to perform the pre-upgrade procedure.
* @throws IOException in the event of error
*/
static void doPreUpgrade(Configuration conf, StorageDirectory sd)
throws IOException {
LOG.info("Starting upgrade of storage directory " + sd.getRoot());
// rename current to tmp
renameCurToTmp(sd);
final File curDir = sd.getCurrentDir();
final File tmpDir = sd.getPreviousTmp();
List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return dir.equals(tmpDir)
&& name.startsWith(NNStorage.NameNodeFile.EDITS.getName());
}
});
for (String s : fileNameList) {
File prevFile = new File(tmpDir, s);
File newFile = new File(curDir, prevFile.getName());
Files.createLink(newFile.toPath(), prevFile.toPath());
}
}
示例8: renameCurToTmp
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Rename the existing current dir to previous.tmp, and create a new empty
* current dir.
*/
public static void renameCurToTmp(StorageDirectory sd) throws IOException {
File curDir = sd.getCurrentDir();
File prevDir = sd.getPreviousDir();
final File tmpDir = sd.getPreviousTmp();
Preconditions.checkState(curDir.exists(),
"Current directory must exist for preupgrade.");
Preconditions.checkState(!prevDir.exists(),
"Previous directory must not exist for preupgrade.");
Preconditions.checkState(!tmpDir.exists(),
"Previous.tmp directory must not exist for preupgrade."
+ "Consider restarting for recovery.");
// rename current to tmp
NNStorage.rename(curDir, tmpDir);
if (!curDir.mkdir()) {
throw new IOException("Cannot create directory " + curDir);
}
}
示例9: doUpgrade
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Perform the upgrade of the storage dir to the given storage info. The new
* storage info is written into the current directory, and the previous.tmp
* directory is renamed to previous.
*
* @param sd the storage directory to upgrade
* @param storage info about the new upgraded versions.
* @throws IOException in the event of error
*/
public static void doUpgrade(StorageDirectory sd, Storage storage)
throws IOException {
LOG.info("Performing upgrade of storage directory " + sd.getRoot());
try {
// Write the version file, since saveFsImage only makes the
// fsimage_<txid>, and the directory is otherwise empty.
storage.writeProperties(sd);
File prevDir = sd.getPreviousDir();
File tmpDir = sd.getPreviousTmp();
Preconditions.checkState(!prevDir.exists(),
"previous directory must not exist for upgrade.");
Preconditions.checkState(tmpDir.exists(),
"previous.tmp directory must exist for upgrade.");
// rename tmp to previous
NNStorage.rename(tmpDir, prevDir);
} catch (IOException ioe) {
LOG.error("Unable to rename temp to previous for " + sd.getRoot(), ioe);
throw ioe;
}
}
示例10: doRollBack
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Perform rollback of the storage dir to the previous state. The existing
* current dir is removed, and the previous dir is renamed to current.
*
* @param sd the storage directory to roll back.
* @throws IOException in the event of error
*/
static void doRollBack(StorageDirectory sd)
throws IOException {
File prevDir = sd.getPreviousDir();
if (!prevDir.exists()) {
return;
}
File tmpDir = sd.getRemovedTmp();
Preconditions.checkState(!tmpDir.exists(),
"removed.tmp directory must not exist for rollback."
+ "Consider restarting for recovery.");
// rename current to tmp
File curDir = sd.getCurrentDir();
Preconditions.checkState(curDir.exists(),
"Current directory must exist for rollback.");
NNStorage.rename(curDir, tmpDir);
// rename previous to current
NNStorage.rename(prevDir, curDir);
// delete tmp dir
NNStorage.deleteDir(tmpDir);
LOG.info("Rollback of " + sd.getRoot() + " is complete.");
}
示例11: selectInputStreams
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk) {
Iterator<StorageDirectory> iter = storage.dirIterator();
while (iter.hasNext()) {
StorageDirectory dir = iter.next();
List<EditLogFile> editFiles;
try {
editFiles = FileJournalManager.matchEditLogs(
dir.getCurrentDir());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
FileJournalManager.addStreamsToCollectionFromFiles(editFiles, streams,
fromTxId, inProgressOk);
}
}
示例12: checkImages
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Confirm that FSImage files in all StorageDirectory are the same,
* and non-empty, and there are the expected number of them.
* @param fsn - the FSNamesystem being checked.
* @param numImageDirs - the configured number of StorageDirectory of type IMAGE.
* @return - the md5 hash of the most recent FSImage files, which must all be the same.
* @throws AssertionError if image files are empty or different,
* if less than two StorageDirectory are provided, or if the
* actual number of StorageDirectory is less than configured.
*/
public static String checkImages(
FSNamesystem fsn, int numImageDirs)
throws Exception {
NNStorage stg = fsn.getFSImage().getStorage();
//any failed StorageDirectory is removed from the storageDirs list
assertEquals("Some StorageDirectories failed Upgrade",
numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
assertTrue("Not enough fsimage copies in MiniDFSCluster " +
"to test parallel write", numImageDirs > 1);
// List of "current/" directory from each SD
List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);
// across directories, all files with same names should be identical hashes
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
FSImageTestUtil.assertSameNewestImage(dirs);
// Return the hash of the newest image file
StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next();
File latestImage = FSImageTestUtil.findLatestImageFile(firstSd);
String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage);
System.err.println("md5 of " + latestImage + ": " + md5);
return md5;
}
示例13: mockStorageForDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
private static NNStorage mockStorageForDirs(final StorageDirectory ... mockDirs)
throws IOException {
NNStorage mockStorage = Mockito.mock(NNStorage.class);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
FSImageStorageInspector inspector =
(FSImageStorageInspector) invocation.getArguments()[0];
for (StorageDirectory sd : mockDirs) {
inspector.inspectDirectory(sd);
}
return null;
}
}).when(mockStorage).inspectStorageDirs(
Mockito.<FSImageStorageInspector>anyObject());
return mockStorage;
}
示例14: createEmptyInProgressEditLog
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
NameNode nn, boolean writeHeader) throws IOException {
long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
File sharedEditsDir = new File(sharedEditsUri.getPath());
StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
txid + 1);
assertTrue("Failed to create in-progress edits file",
inProgressFile.createNewFile());
if (writeHeader) {
DataOutputStream out = new DataOutputStream(new FileOutputStream(
inProgressFile));
EditLogFileOutputStream.writeHeader(
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
out.close();
}
}
示例15: testCurrentStorageInspector
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; //导入依赖的package包/类
/**
* Simple test with image, edits, and inprogress edits
*/
@Test
public void testCurrentStorageInspector() throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(
NameNodeDirType.IMAGE_AND_EDITS,
false,
"/foo/current/" + getImageFileName(123),
"/foo/current/" + getFinalizedEditsFileName(123, 456),
"/foo/current/" + getImageFileName(456),
"/foo/current/" + getInProgressEditsFileName(457));
inspector.inspectDirectory(mockDir);
assertEquals(2, inspector.foundImages.size());
FSImageFile latestImage = inspector.getLatestImages().get(0);
assertEquals(456, latestImage.txId);
assertSame(mockDir, latestImage.sd);
assertTrue(inspector.isUpgradeFinalized());
assertEquals(new File("/foo/current/"+getImageFileName(456)),
latestImage.getFile());
}