当前位置: 首页>>代码示例>>Java>>正文


Java MD5Hash.getDigester方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.MD5Hash.getDigester方法的典型用法代码示例。如果您正苦于以下问题:Java MD5Hash.getDigester方法的具体用法?Java MD5Hash.getDigester怎么用?Java MD5Hash.getDigester使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.MD5Hash的用法示例。


在下文中一共展示了MD5Hash.getDigester方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: genContent

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
/**
 * Generate random contents for the image and store it together with the md5
 * for later comparison.
 */
private ContentBody genContent(long txid) throws IOException {
  MessageDigest digester = MD5Hash.getDigester();

  // write through digester so we can roll the written image
  ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
  DigestOutputStream ds = new DigestOutputStream(bos, digester);

  // generate random bytes
  new Random().nextBytes(randomBytes);
  ds.write(randomBytes);
  ds.flush();

  // get written hash
  MD5Hash hash = new MD5Hash(digester.digest());

  // store contents and digest
  digests.put(txid, hash);
  content.put(txid, Arrays.copyOf(randomBytes, randomBytes.length));

  return new ByteArrayBody(bos.toByteArray(), "filename");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestJournalNodeImageManifest.java

示例2: startImageUpload

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
private static synchronized SessionDescriptor startImageUpload(
    UploadImageParam params, ServletContext context) throws IOException {

  // get and validate storage
  Journal journal = getStorage(context, params);
  JNStorage storage = journal.getImageStorage();

  // get tmp image file
  File outputFile = storage.getCheckpointImageFile(params.txId);

  // starting a new upload
  long sessionId = sessionIds.incrementAndGet();

  MessageDigest digester = MD5Hash.getDigester();
  // open the stream that will be used throughout the upload
  FileOutputStream fos = new FileOutputStream(outputFile);
  OutputStream os = new BufferedOutputStream(new DigestOutputStream(fos,
      digester));

  SessionDescriptor sd = new SessionDescriptor(journal, params.journalId,
      sessionId, os, params.txId, digester);
  sessions.put(sessionId, sd);
  InjectionHandler.processEventIO(InjectionEvent.UPLOADIMAGESERVLET_START,
      context);
  return sd;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:UploadImageServlet.java

示例3: calcPartialBlockChecksum

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }
  
  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DataXceiver.java

示例4: computeMd5ForFile

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
  InputStream in = new FileInputStream(dataFile);
  try {
    MessageDigest digester = MD5Hash.getDigester();
    DigestInputStream dis = new DigestInputStream(in, digester);
    IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024);
    
    return new MD5Hash(digester.digest());
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:MD5FileUtils.java

示例5: writeDataAndAssertContents

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
private MD5Hash writeDataAndAssertContents(
    TestImageUploadStreamInjectionHandler h, int iteration)
    throws IOException {

  // check write digest
  MessageDigest digester = MD5Hash.getDigester();

  // create stream
  HttpImageUploadStream ius = new HttpImageUploadStream(httpAddrs, JID, FAKE_NSINFO,
      startTxId + iteration, 1, bufferSize, maxNumChunks);

  DigestOutputStream ds = new DigestOutputStream(ius, digester);
  DataOutputStream dos = new DataOutputStream(ds);

  // write actual data
  byte[] written = writeData(dos, 10240);

  // flush
  dos.flush();

  // get written hash
  MD5Hash hash = new MD5Hash(digester.digest());

  // close the stream
  dos.close();
  assertContents(cluster, written, startTxId + iteration, hash, h);

  // roll image
  qjm.saveDigestAndRenameCheckpointImage(startTxId + iteration, hash);

  // final assert of the contents
  // get contents using input stream obtained from qjm
  InputStream is = qjm.getImageInputStream(startTxId + iteration)
      .getInputStream();
  byte[] contents = new byte[written.length];
  is.read(contents);
  assertTrue(Arrays.equals(written, contents));

  return hash;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:41,代码来源:TestImageUploadStream.java

示例6: computeMd5ForFile

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
  InputStream in = new FileInputStream(dataFile);
  try {
    MessageDigest digester = MD5Hash.getDigester();
    DigestInputStream dis = new DigestInputStream(in, digester);
    IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024, false);
    
    return new MD5Hash(digester.digest());
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:16,代码来源:MD5FileUtils.java

示例7: computeMd5ForFile

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
/**
 * Read dataFile and compute its MD5 checksum.
 */
public static MD5Hash computeMd5ForFile(File dataFile) throws IOException {
  InputStream in = new FileInputStream(dataFile);
  try {
    MessageDigest digester = MD5Hash.getDigester();
    DigestInputStream dis = new DigestInputStream(in, digester);
    IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128 * 1024);
    
    return new MD5Hash(digester.digest());
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:MD5FileUtils.java

示例8: createRemoteImage

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
private RemoteImage createRemoteImage(long txid, boolean hasMd5)
    throws IOException {
  MessageDigest digester = MD5Hash.getDigester();
  return new RemoteImage(txid, hasMd5 ? new MD5Hash(
      digester.digest(getBytes(txid))) : null);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:7,代码来源:TestQuorumJournalManagerManifest.java

示例9: save

import org.apache.hadoop.io.MD5Hash; //导入方法依赖的package包/类
void save(File newFile, FSImageCompression compression) throws IOException {
  checkNotSaved();

  final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
  FSDirectory fsDir = sourceNamesystem.dir;
  String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
  Step step = new Step(StepType.INODES, sdPath);
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step,
    fsDir.rootDir.numItemsInTree());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  long startTime = now();
  //
  // Write out data
  //
  MessageDigest digester = MD5Hash.getDigester();
  FileOutputStream fout = new FileOutputStream(newFile);
  DigestOutputStream fos = new DigestOutputStream(fout, digester);
  DataOutputStream out = new DataOutputStream(fos);
  try {
    out.writeInt(HdfsConstants.LAYOUT_VERSION);
    // We use the non-locked version of getNamespaceInfo here since
    // the coordinating thread of saveNamespace already has read-locked
    // the namespace for us. If we attempt to take another readlock
    // from the actual saver thread, there's a potential of a
    // fairness-related deadlock. See the comments on HDFS-2223.
    out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
        .getNamespaceID());
    out.writeLong(fsDir.rootDir.numItemsInTree());
    out.writeLong(sourceNamesystem.getGenerationStampV1());
    out.writeLong(sourceNamesystem.getGenerationStampV2());
    out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
    out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
    out.writeLong(context.getTxId());
    out.writeLong(sourceNamesystem.getLastInodeId());

    
    sourceNamesystem.getSnapshotManager().write(out);
    
    // write compression info and set up compressed stream
    out = compression.writeHeaderAndWrapStream(fos);
    LOG.info("Saving image file " + newFile +
             " using " + compression);

    // save the root
    saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
    // save the rest of the nodes
    saveImage(fsDir.rootDir, out, true, counter);
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
    // Now that the step is finished, set counter equal to total to adjust
    // for possible under-counting due to reference inodes.
    prog.setCount(Phase.SAVING_CHECKPOINT, step,
      fsDir.rootDir.numItemsInTree());
    // save files under construction
    sourceNamesystem.saveFilesUnderConstruction(out);
    context.checkCancelled();
    sourceNamesystem.saveSecretManagerState(out, sdPath);
    context.checkCancelled();
    out.flush();
    context.checkCancelled();
    fout.getChannel().force(true);
  } finally {
    out.close();
  }

  saved = true;
  // set md5 of the saved image
  savedDigest = new MD5Hash(digester.digest());

  LOG.info("Image file " + newFile + " of size " + newFile.length() +
      " bytes saved in " + (now() - startTime)/1000 + " seconds.");
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:74,代码来源:FSImageFormat.java


注:本文中的org.apache.hadoop.io.MD5Hash.getDigester方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。