本文整理汇总了Java中org.apache.hadoop.io.MD5Hash类的典型用法代码示例。如果您正苦于以下问题:Java MD5Hash类的具体用法?Java MD5Hash怎么用?Java MD5Hash使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MD5Hash类属于org.apache.hadoop.io包,在下文中一共展示了MD5Hash类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: downloadImageToStorage
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
Storage dstStorage, boolean needDigest) throws IOException {
String fileid = ImageServlet.getParamStringForImage(null,
imageTxId, dstStorage);
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(
NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
dstFiles.get(0).length() + " bytes.");
return hash;
}
示例2: handleUploadImageRequest
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
long imageTxId, Storage dstStorage, InputStream stream,
long advertisedSize, DataTransferThrottler throttler) throws IOException {
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash advertisedDigest = parseMD5Header(request);
MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
advertisedSize, advertisedDigest, fileName, stream, throttler);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
+ dstFiles.get(0).length() + " bytes.");
return hash;
}
示例3: loadFSImage
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Load in the filesystem image from file. It's a big list of
* filenames and blocks.
*/
private void loadFSImage(File curFile, MD5Hash expectedMd5,
FSNamesystem target, MetaRecoveryContext recovery,
boolean requireSameLayoutVersion) throws IOException {
// BlockPoolId is required when the FsImageLoader loads the rolling upgrade
// information. Make sure the ID is properly set.
target.setBlockPoolId(this.getBlockPoolID());
FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
loader.load(curFile, requireSameLayoutVersion);
// Check that the image digest we loaded matches up with what
// we expected
MD5Hash readImageMd5 = loader.getLoadedImageMd5();
if (expectedMd5 != null &&
!expectedMd5.equals(readImageMd5)) {
throw new IOException("Image file " + curFile +
" is corrupt with MD5 checksum of " + readImageMd5 +
" but expecting " + expectedMd5);
}
long txId = loader.getLoadedImageTxId();
LOG.info("Loaded image for txid " + txId + " from " + curFile);
lastAppliedTxId = txId;
storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
示例4: readStoredMd5ForFile
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Read the md5 checksum stored alongside the given data file.
* @param dataFile the file containing data
* @return the checksum stored in dataFile.md5
*/
public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException {
final File md5File = getDigestFileForFile(dataFile);
if (!md5File.exists()) {
return null;
}
final Matcher matcher = readStoredMd5(md5File);
String storedHash = matcher.group(1);
File referencedFile = new File(matcher.group(2));
// Sanity check: Make sure that the file referenced in the .md5 file at
// least has the same name as the file we expect
if (!referencedFile.getName().equals(dataFile.getName())) {
throw new IOException(
"MD5 file at " + md5File + " references file named " +
referencedFile.getName() + " but we expected it to reference " +
dataFile);
}
return new MD5Hash(storedHash);
}
示例5: corruptFSImageMD5
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Corrupts the MD5 sum of the fsimage.
*
* @param corruptAll
* whether to corrupt one or all of the MD5 sums in the configured
* namedirs
* @throws IOException
*/
private void corruptFSImageMD5(boolean corruptAll) throws IOException {
List<URI> nameDirs = (List<URI>)FSNamesystem.getNamespaceDirs(config);
// Corrupt the md5 files in all the namedirs
for (URI uri: nameDirs) {
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage,edits,...}
File nameDir = new File(uri.getPath());
File dfsDir = nameDir.getParentFile();
assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
// Set the md5 file to all zeros
File imageFile = new File(nameDir,
Storage.STORAGE_DIR_CURRENT + "/"
+ NNStorage.getImageFileName(0));
MD5FileUtils.saveMD5File(imageFile, new MD5Hash(new byte[16]));
// Only need to corrupt one if !corruptAll
if (!corruptAll) {
break;
}
}
}
示例6: downloadImageToStorage
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
Storage dstStorage, boolean needDigest, boolean isBootstrapStandby)
throws IOException {
String fileid = ImageServlet.getParamStringForImage(null,
imageTxId, dstStorage, isBootstrapStandby);
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(
NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
dstFiles.get(0).length() + " bytes.");
return hash;
}
示例7: createPassword
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
@Override
protected synchronized byte[] createPassword(TokenIdent identifier) {
int sequenceNum;
long now = Time.now();
sequenceNum = incrementDelegationTokenSeqNum();
identifier.setIssueDate(now);
identifier.setMaxDate(now + tokenMaxLifetime);
identifier.setMasterKeyId(currentKey.getKeyId());
identifier.setSequenceNumber(sequenceNum);
LOG.info("Creating password for identifier: [" + MD5Hash.digest(identifier.getBytes()) + ", " + currentKey.getKeyId() + "]");
byte[] password = createPassword(identifier.getBytes(), currentKey.getKey());
DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now
+ tokenRenewInterval, password, getTrackingIdIfEnabled(identifier));
try {
storeToken(identifier, tokenInfo);
} catch (IOException ioe) {
LOG.error("Could not store token !!", ioe);
}
return password;
}
示例8: testSingleFailure
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Test upload with one failed channel.
*/
private void testSingleFailure(InjectionEventI failOn) throws Exception {
LOG.info("----- testSingleFailure for event : " + failOn);
Random r = new Random();
int numNodes = cluster.getNumNodes();
TestImageUploadStreamInjectionHandler h = new TestImageUploadStreamInjectionHandler(
numNodes);
InjectionHandler.set(h);
for (int i = 0; i < iterations; i++) {
LOG.info("-- iteration: " + i);
int failJournal = r.nextInt(numNodes);
h.setFailure(failJournal, failOn);
// the write should succeed
MD5Hash digest = writeDataAndAssertContents(h, i);
// clear hashes for next iteration
h.clearHandler();
// finalize the image
assertManifest(i, digest, true);
}
}
示例9: valueOf
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/** Return the object represented in the attributes. */
public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
) throws SAXException {
final String bytesPerCRC = attrs.getValue("bytesPerCRC");
final String crcPerBlock = attrs.getValue("crcPerBlock");
final String md5 = attrs.getValue("md5");
if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
return null;
}
try {
return new MD5MD5CRC32FileChecksum(Integer.valueOf(bytesPerCRC),
Integer.valueOf(crcPerBlock), new MD5Hash(md5));
} catch(Exception e) {
throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5, e);
}
}
示例10: saveDigestAndRenameCheckpointImage
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* This is called by the 2NN after having downloaded an image, and by
* the NN after having received a new image from the 2NN. It
* renames the image from fsimage_N.ckpt to fsimage_N and also
* saves the related .md5 file into place.
*/
synchronized void saveDigestAndRenameCheckpointImage(
long txid, MD5Hash digest) throws IOException {
if (!digest.equals(storage.getCheckpointImageDigest(txid))) {
throw new IOException(
"Checkpoint image is corrupt: expecting an MD5 checksum of" +
digest + " but is " + storage.getCheckpointImageDigest(txid));
}
imageSet.saveDigestAndRenameCheckpointImage(txid, digest);
// So long as this is the newest image available,
// advertise it as such to other checkpointers
// from now on
storage.setMostRecentCheckpointTxId(txid);
}
示例11: loadFSImage
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Load in the filesystem image from file. It's a big list of
* filenames and blocks.
*/
private void loadFSImage(File curFile, MD5Hash expectedMd5,
FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
FSImageFormat.Loader loader = new FSImageFormat.Loader(
conf, target);
loader.load(curFile);
target.setBlockPoolId(this.getBlockPoolID());
// Check that the image digest we loaded matches up with what
// we expected
MD5Hash readImageMd5 = loader.getLoadedImageMd5();
if (expectedMd5 != null &&
!expectedMd5.equals(readImageMd5)) {
throw new IOException("Image file " + curFile +
" is corrupt with MD5 checksum of " + readImageMd5 +
" but expecting " + expectedMd5);
}
long txId = loader.getLoadedImageTxId();
LOG.info("Loaded image for txid " + txId + " from " + curFile);
lastAppliedTxId = txId;
storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
示例12: getFileClient
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Client-side Method to fetch file from a server
* Copies the response from the URL to a list of local files.
* @param dstStorage if an error occurs writing to one of the files,
* this storage object will be notified.
* @Return a digest of the received file if getChecksum is true
*/
static MD5Hash getFileClient(URL infoServer,
String queryString, List<File> localPaths,
Storage dstStorage, boolean getChecksum) throws IOException {
URL url = new URL(infoServer, ImageServlet.PATH_SPEC + "?" + queryString);
LOG.info("Opening connection to " + url);
return doGetUrl(url, localPaths, dstStorage, getChecksum);
}
示例13: loadFSImageFile
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
FSImageFile imageFile, StartupOption startupOption) throws IOException {
LOG.debug("Planning to load image :\n" + imageFile);
StorageDirectory sdForProperties = imageFile.sd;
storage.readProperties(sdForProperties, startupOption);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
// For txid-based layout, we should have a .md5 file
// next to the image file
boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
.matches(startupOption);
loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
} else if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
// In 0.22, we have the checksum stored in the VERSION file.
String md5 = storage.getDeprecatedProperty(
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
if (md5 == null) {
throw new InconsistentFSStateException(sdForProperties.getRoot(),
"Message digest property " +
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
" not set for storage directory " + sdForProperties.getRoot());
}
loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
false);
} else {
// We don't have any record of the md5sum
loadFSImage(imageFile.getFile(), null, target, recovery, false);
}
}
示例14: saveDigestAndRenameCheckpointImage
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* This is called by the 2NN after having downloaded an image, and by
* the NN after having received a new image from the 2NN. It
* renames the image from fsimage_N.ckpt to fsimage_N and also
* saves the related .md5 file into place.
*/
public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf,
long txid, MD5Hash digest) throws IOException {
// Write and rename MD5 file
List<StorageDirectory> badSds = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
File imageFile = NNStorage.getImageFile(sd, nnf, txid);
try {
MD5FileUtils.saveMD5File(imageFile, digest);
} catch (IOException ioe) {
badSds.add(sd);
}
}
storage.reportErrorsOnDirectories(badSds);
CheckpointFaultInjector.getInstance().afterMD5Rename();
// Rename image from tmp file
renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
// So long as this is the newest image available,
// advertise it as such to other checkpointers
// from now on
if (txid > storage.getMostRecentCheckpointTxId()) {
storage.setMostRecentCheckpointInfo(txid, Time.now());
}
}
示例15: setVerificationHeadersForGet
import org.apache.hadoop.io.MD5Hash; //导入依赖的package包/类
/**
* Set headers for content length, and, if available, md5.
* @throws IOException
*/
public static void setVerificationHeadersForGet(HttpServletResponse response,
File file) throws IOException {
response.setHeader(TransferFsImage.CONTENT_LENGTH,
String.valueOf(file.length()));
MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
if (hash != null) {
response.setHeader(TransferFsImage.MD5_HEADER, hash.toString());
}
}