当前位置: 首页>>代码示例>>Java>>正文


Java DataChecksum.getChecksumSize方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.DataChecksum.getChecksumSize方法的典型用法代码示例。如果您正苦于以下问题:Java DataChecksum.getChecksumSize方法的具体用法?Java DataChecksum.getChecksumSize怎么用?Java DataChecksum.getChecksumSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.DataChecksum的用法示例。


在下文中一共展示了DataChecksum.getChecksumSize方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: RemoteBlockReader

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private RemoteBlockReader(String file, String bpid, long blockId,
    DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  // Path is used only for printing block and file information in debug
  super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
                  ":" + bpid + ":of:"+ file)/*too non path-like?*/,
        1, verifyChecksum,
        checksum.getChecksumSize() > 0? checksum : null, 
        checksum.getBytesPerChecksum(),
        checksum.getChecksumSize());

  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = in;
  this.checksum = checksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);

  this.firstChunkOffset = firstChunkOffset;
  lastChunkOffset = firstChunkOffset;
  lastChunkLen = -1;

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
  this.peerCache = peerCache;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:RemoteBlockReader.java

示例2: calcPartialBlockChecksum

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
    long requestLength, DataChecksum checksum, DataInputStream checksumIn)
    throws IOException {
  final int bytesPerCRC = checksum.getBytesPerChecksum();
  final int csize = checksum.getChecksumSize();
  final byte[] buffer = new byte[4*1024];
  MessageDigest digester = MD5Hash.getDigester();

  long remaining = requestLength / bytesPerCRC * csize;
  for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
    toDigest = checksumIn.read(buffer, 0,
        (int) Math.min(remaining, buffer.length));
    if (toDigest < 0) {
      break;
    }
    digester.update(buffer, 0, toDigest);
  }
  
  int partialLength = (int) (requestLength % bytesPerCRC);
  if (partialLength > 0) {
    byte[] buf = new byte[partialLength];
    final InputStream blockIn = datanode.data.getBlockInputStream(block,
        requestLength - partialLength);
    try {
      // Get the CRC of the partialLength.
      IOUtils.readFully(blockIn, buf, 0, partialLength);
    } finally {
      IOUtils.closeStream(blockIn);
    }
    checksum.update(buf, 0, partialLength);
    byte[] partialCrc = new byte[csize];
    checksum.writeValue(partialCrc, 0, true);
    digester.update(partialCrc);
  }
  return new MD5Hash(digester.digest());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:DataXceiver.java

示例3: RemoteBlockReader

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private RemoteBlockReader(String file, String bpid, long blockId,
    DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
  // Path is used only for printing block and file information in debug
  super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
          ":" + bpid + ":of:"+ file)/*too non path-like?*/,
      1, verifyChecksum,
      checksum.getChecksumSize() > 0? checksum : null,
      checksum.getBytesPerChecksum(),
      checksum.getChecksumSize());

  this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));

  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = in;
  this.checksum = checksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);

  this.firstChunkOffset = firstChunkOffset;
  lastChunkOffset = firstChunkOffset;
  lastChunkLen = -1;

  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
  this.peerCache = peerCache;
  this.tracer = tracer;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:38,代码来源:RemoteBlockReader.java

示例4: truncateBlock

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
static private void truncateBlock(File blockFile, File metaFile,
    long oldlen, long newlen) throws IOException {
  LOG.info("truncateBlock: blockFile=" + blockFile
      + ", metaFile=" + metaFile
      + ", oldlen=" + oldlen
      + ", newlen=" + newlen);

  if (newlen == oldlen) {
    return;
  }
  if (newlen > oldlen) {
    throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
        + ") to newlen (=" + newlen + ")");
  }

  DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); 
  int checksumsize = dcs.getChecksumSize();
  int bpc = dcs.getBytesPerChecksum();
  long n = (newlen - 1)/bpc + 1;
  long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
  long lastchunkoffset = (n - 1)*bpc;
  int lastchunksize = (int)(newlen - lastchunkoffset); 
  byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; 

  RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
  try {
    //truncate blockFile 
    blockRAF.setLength(newlen);
 
    //read last chunk
    blockRAF.seek(lastchunkoffset);
    blockRAF.readFully(b, 0, lastchunksize);
  } finally {
    blockRAF.close();
  }

  //compute checksum
  dcs.update(b, 0, lastchunksize);
  dcs.writeValue(b, 0, false);

  //update metaFile 
  RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
  try {
    metaRAF.setLength(newmetalen);
    metaRAF.seek(newmetalen - checksumsize);
    metaRAF.write(b, 0, checksumsize);
  } finally {
    metaRAF.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:FsDatasetImpl.java

示例5: blockChecksum

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);
  
  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelper.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:DataXceiver.java

示例6: validateIntegrityAndSetLength

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * 
 * This algorithm assumes that data corruption caused by unexpected 
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 * 
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            ioFileBufferSize));

    // read and handle the common header here. For now just a version
    final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
        checksumIn, metaFile);
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks = Math.min(
        (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum, 
        (metaFileLen - crcHeaderLen)/checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize = (int)Math.min(
        bytesPerChecksum, blockFileLen-lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize+checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    long validFileLength;
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      validFileLength = lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      validFileLength = lastChunkStartPos;
    }

    // truncate if extra bytes are present without CRC
    if (blockFile.length() > validFileLength) {
      RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
      try {
        // truncate blockFile
        blockRAF.setLength(validFileLength);
      } finally {
        blockRAF.close();
      }
    }

    return validFileLength;
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:BlockPoolSlice.java

示例7: createStreams

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
@Override // ReplicaInPipelineInterface
public ReplicaOutputStreams createStreams(boolean isCreate, 
    DataChecksum requestedChecksum) throws IOException {
  File blockFile = getBlockFile();
  File metaFile = getMetaFile();
  if (DataNode.LOG.isDebugEnabled()) {
    DataNode.LOG.debug("writeTo blockfile is " + blockFile +
                       " of size " + blockFile.length());
    DataNode.LOG.debug("writeTo metafile is " + metaFile +
                       " of size " + metaFile.length());
  }
  long blockDiskSize = 0L;
  long crcDiskSize = 0L;
  
  // the checksum that should actually be used -- this
  // may differ from requestedChecksum for appends.
  final DataChecksum checksum;
  
  RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
  
  if (!isCreate) {
    // For append or recovery, we must enforce the existing checksum.
    // Also, verify that the file has correct lengths, etc.
    boolean checkedMeta = false;
    try {
      BlockMetadataHeader header = BlockMetadataHeader.readHeader(metaRAF);
      checksum = header.getChecksum();
      
      if (checksum.getBytesPerChecksum() !=
          requestedChecksum.getBytesPerChecksum()) {
        throw new IOException("Client requested checksum " +
            requestedChecksum + " when appending to an existing block " +
            "with different chunk size: " + checksum);
      }
      
      int bytesPerChunk = checksum.getBytesPerChecksum();
      int checksumSize = checksum.getChecksumSize();
      
      blockDiskSize = bytesOnDisk;
      crcDiskSize = BlockMetadataHeader.getHeaderSize() +
        (blockDiskSize+bytesPerChunk-1)/bytesPerChunk*checksumSize;
      if (blockDiskSize>0 && 
          (blockDiskSize>blockFile.length() || crcDiskSize>metaFile.length())) {
        throw new IOException("Corrupted block: " + this);
      }
      checkedMeta = true;
    } finally {
      if (!checkedMeta) {
        // clean up in case of exceptions.
        IOUtils.closeStream(metaRAF);
      }
    }
  } else {
    // for create, we can use the requested checksum
    checksum = requestedChecksum;
  }
  
  FileOutputStream blockOut = null;
  FileOutputStream crcOut = null;
  try {
    blockOut = new FileOutputStream(
        new RandomAccessFile( blockFile, "rw" ).getFD() );
    crcOut = new FileOutputStream(metaRAF.getFD() );
    if (!isCreate) {
      blockOut.getChannel().position(blockDiskSize);
      crcOut.getChannel().position(crcDiskSize);
    }
    return new ReplicaOutputStreams(blockOut, crcOut, checksum,
        getVolume().isTransientStorage());
  } catch (IOException e) {
    IOUtils.closeStream(blockOut);
    IOUtils.closeStream(metaRAF);
    throw e;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:76,代码来源:ReplicaInPipeline.java

示例8: blockChecksum

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
@Override
public void blockChecksum(final ExtendedBlock block,
    final Token<BlockTokenIdentifier> blockToken) throws IOException {
  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  checkAccess(out, true, block, blockToken,
      Op.BLOCK_CHECKSUM, BlockTokenIdentifier.AccessMode.READ);
  // client side now can specify a range of the block for checksum
  long requestLength = block.getNumBytes();
  Preconditions.checkArgument(requestLength >= 0);
  long visibleLength = datanode.data.getReplicaVisibleLength(block);
  boolean partialBlk = requestLength < visibleLength;

  updateCurrentThreadName("Reading metadata for block " + block);
  final LengthInputStream metadataIn = datanode.data
      .getMetaDataInputStream(block);
  
  final DataInputStream checksumIn = new DataInputStream(
      new BufferedInputStream(metadataIn, ioFileBufferSize));
  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum();
    final int csize = checksum.getChecksumSize();
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = csize <= 0 ? 0 : 
      (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;

    final MD5Hash md5 = partialBlk && crcPerBlock > 0 ? 
        calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
          : MD5Hash.digest(checksumIn);
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()             
        .setBytesPerCrc(bytesPerCRC)
        .setCrcPerBlock(crcPerBlock)
        .setMd5(ByteString.copyFrom(md5.getDigest()))
        .setCrcType(PBHelperClient.convert(checksum.getChecksumType())))
      .build()
      .writeDelimitedTo(out);
    out.flush();
  } catch (IOException ioe) {
    LOG.info("blockChecksum " + block + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  datanode.metrics.addBlockChecksumOp(elapsed());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:63,代码来源:DataXceiver.java

示例9: validateIntegrityAndSetLength

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * 
 * This algorithm assumes that data corruption caused by unexpected 
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 * 
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            HdfsConstants.IO_FILE_BUFFER_SIZE));

    // read and handle the common header here. For now just a version
    final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
        checksumIn, metaFile);
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks = Math.min(
        (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum, 
        (metaFileLen - crcHeaderLen)/checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize = (int)Math.min(
        bytesPerChecksum, blockFileLen-lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize+checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    long validFileLength;
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      validFileLength = lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      validFileLength = lastChunkStartPos;
    }

    // truncate if extra bytes are present without CRC
    if (blockFile.length() > validFileLength) {
      RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
      try {
        // truncate blockFile
        blockRAF.setLength(validFileLength);
      } finally {
        blockRAF.close();
      }
    }

    return validFileLength;
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:77,代码来源:BlockPoolSlice.java


注:本文中的org.apache.hadoop.util.DataChecksum.getChecksumSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。