本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage类的典型用法代码示例。如果您正苦于以下问题:Java Storage类的具体用法?Java Storage怎么用?Java Storage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Storage类属于org.apache.hadoop.hdfs.server.common包,在下文中一共展示了Storage类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: downloadImageToStorage
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
Storage dstStorage, boolean needDigest) throws IOException {
String fileid = ImageServlet.getParamStringForImage(null,
imageTxId, dstStorage);
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(
NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
dstFiles.get(0).length() + " bytes.");
return hash;
}
示例2: handleUploadImageRequest
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
long imageTxId, Storage dstStorage, InputStream stream,
long advertisedSize, DataTransferThrottler throttler) throws IOException {
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash advertisedDigest = parseMD5Header(request);
MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
advertisedSize, advertisedDigest, fileName, stream, throttler);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
+ dstFiles.get(0).length() + " bytes.");
return hash;
}
示例3: EditLogBackupOutputStream
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
JournalInfo journalInfo) // active name-node
throws IOException {
super();
this.bnRegistration = bnReg;
this.journalInfo = journalInfo;
InetSocketAddress bnAddress =
NetUtils.createSocketAddr(bnRegistration.getAddress());
try {
this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
} catch(IOException e) {
Storage.LOG.error("Error connecting to: " + bnAddress, e);
throw e;
}
this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
示例4: readLogVersion
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Read the header of fsedit log
* @param in fsedit stream
* @return the edit log version number
* @throws IOException if error occurs
*/
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
throws IOException, LogHeaderCorruptException {
int logVersion;
try {
logVersion = in.readInt();
} catch (EOFException eofe) {
throw new LogHeaderCorruptException(
"Reached EOF when reading log header");
}
if (verifyLayoutVersion &&
(logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
throw new LogHeaderCorruptException(
"Unexpected version of the file system log file: "
+ logVersion + ". Current version = "
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + ".");
}
return logVersion;
}
示例5: doUpgrade
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Perform the upgrade of the storage dir to the given storage info. The new
* storage info is written into the current directory, and the previous.tmp
* directory is renamed to previous.
*
* @param sd the storage directory to upgrade
* @param storage info about the new upgraded versions.
* @throws IOException in the event of error
*/
public static void doUpgrade(StorageDirectory sd, Storage storage)
throws IOException {
LOG.info("Performing upgrade of storage directory " + sd.getRoot());
try {
// Write the version file, since saveFsImage only makes the
// fsimage_<txid>, and the directory is otherwise empty.
storage.writeProperties(sd);
File prevDir = sd.getPreviousDir();
File tmpDir = sd.getPreviousTmp();
Preconditions.checkState(!prevDir.exists(),
"previous directory must not exist for upgrade.");
Preconditions.checkState(tmpDir.exists(),
"previous.tmp directory must exist for upgrade.");
// rename tmp to previous
NNStorage.rename(tmpDir, prevDir);
} catch (IOException ioe) {
LOG.error("Unable to rename temp to previous for " + sd.getRoot(), ioe);
throw ioe;
}
}
示例6: getInitialVolumeFailureInfos
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Gets initial volume failure information for all volumes that failed
* immediately at startup. The method works by determining the set difference
* between all configured storage locations and the actual storage locations in
* use after attempting to put all of them into service.
*
* @return each storage location that has failed
*/
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
Collection<StorageLocation> dataLocations, DataStorage storage) {
Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
dataLocations.size());
for (StorageLocation sl: dataLocations) {
failedLocationSet.add(sl.getFile().getAbsolutePath());
}
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
it.hasNext(); ) {
Storage.StorageDirectory sd = it.next();
failedLocationSet.remove(sd.getRoot().getAbsolutePath());
}
List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
failedLocationSet.size());
long failureDate = Time.now();
for (String failedStorageLocation: failedLocationSet) {
volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
failureDate));
}
return volumeFailureInfos;
}
示例7: checkResultBlockPool
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* For block pool, verify that the current and/or previous exist as indicated
* by the method parameters. If previous exists, verify that
* it hasn't been modified by comparing the checksum of all it's
* containing files with their original checksum. It is assumed that
* the server has recovered.
* @param baseDirs directories pointing to block pool storage
* @param bpid block pool Id
* @param currentShouldExist current directory exists under storage
* @param currentShouldExist previous directory exists under storage
*/
void checkResultBlockPool(String[] baseDirs, boolean currentShouldExist,
boolean previousShouldExist) throws IOException
{
if (currentShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
if (previousShouldExist) {
for (int i = 0; i < baseDirs.length; i++) {
File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
assertTrue(bpPrevDir.isDirectory());
assertEquals(
UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
false), UpgradeUtilities.checksumMasterBlockPoolContents());
}
}
}
示例8: testStartingWithUpgradeInProgressSucceeds
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Make sure that an HA NN will start if a previous upgrade was in progress.
*/
@Test
public void testStartingWithUpgradeInProgressSucceeds() throws Exception {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.numDataNodes(0)
.build();
// Simulate an upgrade having started.
for (int i = 0; i < 2; i++) {
for (URI uri : cluster.getNameDirs(i)) {
File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS);
LOG.info("creating previous tmp dir: " + prevTmp);
assertTrue(prevTmp.mkdirs());
}
}
cluster.restartNameNodes();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例9: corruptFSImageMD5
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Corrupts the MD5 sum of the fsimage.
*
* @param corruptAll
* whether to corrupt one or all of the MD5 sums in the configured
* namedirs
* @throws IOException
*/
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
// Corrupt the md5 files in all the namedirs
for (URI uri: nameDirs) {
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage,edits,...}
File nameDir = new File(uri.getPath());
File dfsDir = nameDir.getParentFile();
assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
// Set the md5 file to all zeros
File imageFile = new File(nameDir,
Storage.STORAGE_DIR_CURRENT + "/"
+ NNStorage.getImageFileName(0));
MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
// Only need to corrupt one if !corruptAll
if (!corruptAll) {
break;
}
}
}
示例10: createStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
private static void createStorageDirs(DataStorage storage, Configuration conf,
int numDirs) throws IOException {
List<Storage.StorageDirectory> dirs =
new ArrayList<Storage.StorageDirectory>();
List<String> dirStrings = new ArrayList<String>();
for (int i = 0; i < numDirs; i++) {
File loc = new File(BASE_DIR + "/data" + i);
dirStrings.add(new Path(loc.toString()).toUri().toString());
loc.mkdirs();
dirs.add(createStorageDirectory(loc));
when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
}
String dataDir = StringUtils.join(",", dirStrings);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
when(storage.dirIterator()).thenReturn(dirs.iterator());
when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
示例11: getTrashDirectoryForBlockFile
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Test conversion from a block file path to its target trash
* directory.
*/
public void getTrashDirectoryForBlockFile(String fileName, int nestingLevel) {
final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
final String blockFileName = fileName;
String testFilePath =
storage.getSingularStorageDir().getRoot() + File.separator +
Storage.STORAGE_DIR_CURRENT +
blockFileSubdir + blockFileName;
String expectedTrashPath =
storage.getSingularStorageDir().getRoot() + File.separator +
BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Got subdir " + blockFileSubdir);
LOG.info("Generated file path " + testFilePath);
assertThat(storage.getTrashDirectory(new File(testFilePath)), is(expectedTrashPath));
}
示例12: getRestoreDirectoryForBlockFile
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
public void getRestoreDirectoryForBlockFile(String fileName, int nestingLevel) {
BlockPoolSliceStorage storage = makeBlockPoolStorage();
final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
final String blockFileName = fileName;
String deletedFilePath =
storage.getSingularStorageDir().getRoot() + File.separator +
BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir + blockFileName;
String expectedRestorePath =
storage.getSingularStorageDir().getRoot() + File.separator +
Storage.STORAGE_DIR_CURRENT +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Generated deleted file path " + deletedFilePath);
assertThat(storage.getRestoreDirectory(new File(deletedFilePath)),
is(expectedRestorePath));
}
示例13: createBlockPoolStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of block pool storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
* directory does not exist, it will be created. If the directory already
* exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @param bpid block pool id for which the storage directory is created.
* @return the array of created directories
*/
public static File[] createBlockPoolStorageDirs(String[] parents,
String dirName, String bpid) throws Exception {
File[] retVal = new File[parents.length];
Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage,
bpid, Storage.STORAGE_DIR_CURRENT));
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i] + "/current/" + bpid, dirName);
createEmptyDirs(new String[] {newDir.toString()});
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(bpCurDir,
new Path(newDir.toString()),
false);
retVal[i] = newDir;
}
return retVal;
}
示例14: downloadImageToStorage
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
Storage dstStorage, boolean needDigest, boolean isBootstrapStandby)
throws IOException {
String fileid = ImageServlet.getParamStringForImage(null,
imageTxId, dstStorage, isBootstrapStandby);
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(
NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
dstFiles.get(0).length() + " bytes.");
return hash;
}
示例15: readLogVersion
import org.apache.hadoop.hdfs.server.common.Storage; //导入依赖的package包/类
/**
* Read the header of fsedit log
* @param in fsedit stream
* @return the edit log version number
* @throws IOException if error occurs
*/
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
throws IOException, LogHeaderCorruptException {
int logVersion;
try {
logVersion = in.readInt();
} catch (EOFException eofe) {
throw new LogHeaderCorruptException(
"Reached EOF when reading log header");
}
if (verifyLayoutVersion &&
(logVersion < HdfsServerConstants.NAMENODE_LAYOUT_VERSION || // future version
logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
throw new LogHeaderCorruptException(
"Unexpected version of the file system log file: "
+ logVersion + ". Current version = "
+ HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".");
}
return logVersion;
}