当前位置: 首页>>代码示例>>Java>>正文


Java DataChecksum.update方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.DataChecksum.update方法的典型用法代码示例。如果您正苦于以下问题:Java DataChecksum.update方法的具体用法?Java DataChecksum.update怎么用?Java DataChecksum.update使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.DataChecksum的用法示例。


在下文中一共展示了DataChecksum.update方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: calcPartialBlockChecksum

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }
  
  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DataXceiver.java

示例2: truncateBlock

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
static private void truncateBlock(File blockFile, File metaFile,
    long oldlen, long newlen) throws IOException {
  LOG.info("truncateBlock: blockFile=" + blockFile
      + ", metaFile=" + metaFile
      + ", oldlen=" + oldlen
      + ", newlen=" + newlen);

  if (newlen == oldlen) {
    return;
  }
  if (newlen > oldlen) {
    throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
        + ") to newlen (=" + newlen + ")");
  }

  DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); 
  int checksumsize = dcs.getChecksumSize();
  int bpc = dcs.getBytesPerChecksum();
  long n = (newlen - 1)/bpc + 1;
  long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
  long lastchunkoffset = (n - 1)*bpc;
  int lastchunksize = (int)(newlen - lastchunkoffset); 
  byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; 

  RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
  try {
    //truncate blockFile 
    blockRAF.setLength(newlen);
 
    //read last chunk
    blockRAF.seek(lastchunkoffset);
    blockRAF.readFully(b, 0, lastchunksize);
  } finally {
    blockRAF.close();
  }

  //compute checksum
  dcs.update(b, 0, lastchunksize);
  dcs.writeValue(b, 0, false);

  //update metaFile 
  RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
  try {
    metaRAF.setLength(newmetalen);
    metaRAF.seek(newmetalen - checksumsize);
    metaRAF.write(b, 0, checksumsize);
  } finally {
    metaRAF.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:FsDatasetImpl.java

示例3: validateIntegrityAndSetLength

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * 
 * This algorithm assumes that data corruption caused by unexpected 
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 * 
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            ioFileBufferSize));

    // read and handle the common header here. For now just a version
    final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
        checksumIn, metaFile);
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks = Math.min(
        (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum, 
        (metaFileLen - crcHeaderLen)/checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize = (int)Math.min(
        bytesPerChecksum, blockFileLen-lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize+checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    long validFileLength;
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      validFileLength = lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      validFileLength = lastChunkStartPos;
    }

    // truncate if extra bytes are present without CRC
    if (blockFile.length() > validFileLength) {
      RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
      try {
        // truncate blockFile
        blockRAF.setLength(validFileLength);
      } finally {
        blockRAF.close();
      }
    }

    return validFileLength;
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:BlockPoolSlice.java

示例4: validateIntegrityAndSetLength

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * 
 * This algorithm assumes that data corruption caused by unexpected 
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 * 
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            HdfsConstants.IO_FILE_BUFFER_SIZE));

    // read and handle the common header here. For now just a version
    final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
        checksumIn, metaFile);
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks = Math.min(
        (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum, 
        (metaFileLen - crcHeaderLen)/checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize = (int)Math.min(
        bytesPerChecksum, blockFileLen-lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize+checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    long validFileLength;
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      validFileLength = lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      validFileLength = lastChunkStartPos;
    }

    // truncate if extra bytes are present without CRC
    if (blockFile.length() > validFileLength) {
      RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
      try {
        // truncate blockFile
        blockRAF.setLength(validFileLength);
      } finally {
        blockRAF.close();
      }
    }

    return validFileLength;
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:77,代码来源:BlockPoolSlice.java


注:本文中的org.apache.hadoop.util.DataChecksum.update方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。