当前位置: 首页>>代码示例>>Java>>正文


Java BlockMetadataHeader.readHeader方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader.readHeader方法的典型用法代码示例。如果您正苦于以下问题:Java BlockMetadataHeader.readHeader方法的具体用法?Java BlockMetadataHeader.readHeader怎么用?Java BlockMetadataHeader.readHeader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader的用法示例。


在下文中一共展示了BlockMetadataHeader.readHeader方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: BlockReaderLocal

import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; //导入方法依赖的package包/类
public BlockReaderLocal(DFSClient.Conf conf, String filename,
    ExtendedBlock block, long startOffset, long length,
    FileInputStream dataIn, FileInputStream checksumIn,
    DatanodeID datanodeID, boolean verifyChecksum,
    FileInputStreamCache fisCache) throws IOException {
  this.dataIn = dataIn;
  this.checksumIn = checksumIn;
  this.startOffset = Math.max(startOffset, 0);
  this.filename = filename;
  this.datanodeID = datanodeID;
  this.block = block;
  this.fisCache = fisCache;

  // read and handle the common header here. For now just a version
  checksumIn.getChannel().position(0);
  BlockMetadataHeader header = BlockMetadataHeader
      .readHeader(new DataInputStream(
          new BufferedInputStream(checksumIn,
              BlockMetadataHeader.getHeaderSize())));
  short version = header.getVersion();
  if (version != BlockMetadataHeader.VERSION) {
    throw new IOException("Wrong version (" + version + ") of the " +
        "metadata file for " + filename + ".");
  }
  this.verifyChecksum = verifyChecksum && !conf.skipShortCircuitChecksums;
  long firstChunkOffset;
  if (this.verifyChecksum) {
    this.checksum = header.getChecksum();
    this.bytesPerChecksum = this.checksum.getBytesPerChecksum();
    this.checksumSize = this.checksum.getChecksumSize();
    firstChunkOffset = startOffset
        - (startOffset % checksum.getBytesPerChecksum());
    this.offsetFromChunkBoundary = (int) (startOffset - firstChunkOffset);

    int chunksPerChecksumRead = getSlowReadBufferNumChunks(
        conf.shortCircuitBufferSize, bytesPerChecksum);
    slowReadBuff = bufferPool.getBuffer(bytesPerChecksum * chunksPerChecksumRead);
    checksumBuff = bufferPool.getBuffer(checksumSize * chunksPerChecksumRead);
    // Initially the buffers have nothing to read.
    slowReadBuff.flip();
    checksumBuff.flip();
    long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
    IOUtils.skipFully(checksumIn, checkSumOffset);
  } else {
    firstChunkOffset = startOffset;
    this.checksum = null;
    this.bytesPerChecksum = 0;
    this.checksumSize = 0;
    this.offsetFromChunkBoundary = 0;
  }
  
  boolean success = false;
  try {
    // Reposition both input streams to the beginning of the chunk
    // containing startOffset
    this.dataIn.getChannel().position(firstChunkOffset);
    success = true;
  } finally {
    if (success) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Created BlockReaderLocal for file " + filename
            + " block " + block + " in datanode " + datanodeID);
      }
    } else {
      if (slowReadBuff != null) bufferPool.returnBuffer(slowReadBuff);
      if (checksumBuff != null) bufferPool.returnBuffer(checksumBuff);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:70,代码来源:BlockReaderLocal.java

示例2: validateIntegrity

import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * 
 * This algorithm assumes that data corruption caused by unexpected 
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 * 
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrity(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            HdfsConstants.IO_FILE_BUFFER_SIZE));

    // read and handle the common header here. For now just a version
    BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    short version = header.getVersion();
    if (version != BlockMetadataHeader.VERSION) {
      FsDatasetImpl.LOG.warn("Wrong version (" + version + ") for metadata file "
          + metaFile + " ignoring ...");
    }
    DataChecksum checksum = header.getChecksum();
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks = Math.min(
        (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum, 
        (metaFileLen - crcHeaderLen)/checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize = (int)Math.min(
        bytesPerChecksum, blockFileLen-lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize+checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      return lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      return lastChunkStartPos;
    }
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:68,代码来源:BlockPoolSlice.java

示例3: validateIntegrity

import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * <p/>
 * This algorithm assumes that data corruption caused by unexpected
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 *
 * @param blockFile
 *     the block file
 * @param genStamp
 *     generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrity(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            HdfsConstants.IO_FILE_BUFFER_SIZE));

    // read and handle the common header here. For now just a version
    BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    short version = header.getVersion();
    if (version != BlockMetadataHeader.VERSION) {
      FsDatasetImpl.LOG.warn(
          "Wrong version (" + version + ") for metadata file " + metaFile +
              " ignoring ...");
    }
    DataChecksum checksum = header.getChecksum();
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks =
        Math.min((blockFileLen + bytesPerChecksum - 1) / bytesPerChecksum,
            (metaFileLen - crcHeaderLen) / checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks - 1) * checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks - 1) * bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize =
        (int) Math.min(bytesPerChecksum, blockFileLen - lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize + checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      return lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      return lastChunkStartPos;
    }
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:71,代码来源:BlockPoolSlice.java

示例4: newBlockReader

import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; //导入方法依赖的package包/类
/**
 * The only way this object can be instantiated.
 */
static BlockReaderLocal newBlockReader(Configuration conf,
  String file, Block blk, Token<BlockTokenIdentifier> token, DatanodeInfo node, 
  int socketTimeout, long startOffset, long length) throws IOException {
  
  LocalDatanodeInfo localDatanodeInfo =  getLocalDatanodeInfo(node.getIpcPort());
  // check the cache first
  BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
  if (pathinfo == null) {
    pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token);
  }

  // check to see if the file exists. It may so happen that the
  // HDFS file has been deleted and this block-lookup is occurring
  // on behalf of a new HDFS file. This time, the block file could
  // be residing in a different portion of the fs.data.dir directory.
  // In this case, we remove this entry from the cache. The next
  // call to this method will re-populate the cache.
  FileInputStream dataIn = null;
  FileInputStream checksumIn = null;
  BlockReaderLocal localBlockReader = null;
  boolean skipChecksum = shortCircuitChecksum(conf);
  try {
    // get a local file system
    File blkfile = new File(pathinfo.getBlockPath());
    dataIn = new FileInputStream(blkfile);

    if (LOG.isDebugEnabled()) {
      LOG.debug("New BlockReaderLocal for file " + blkfile + " of size "
          + blkfile.length() + " startOffset " + startOffset + " length "
          + length + " short circuit checksum " + skipChecksum);
    }

    if (!skipChecksum) {
      // get the metadata file
      File metafile = new File(pathinfo.getMetaPath());
      checksumIn = new FileInputStream(metafile);

      // read and handle the common header here. For now just a version
      BlockMetadataHeader header = BlockMetadataHeader
          .readHeader(new DataInputStream(checksumIn));
      short version = header.getVersion();
      if (version != FSDataset.METADATA_VERSION) {
        LOG.warn("Wrong version (" + version + ") for metadata file for "
            + blk + " ignoring ...");
      }
      DataChecksum checksum = header.getChecksum();
      localBlockReader = new BlockReaderLocal(conf, file, blk, token, startOffset, length,
          pathinfo, checksum, true, dataIn, checksumIn);
    } else {
      localBlockReader = new BlockReaderLocal(conf, file, blk, token, startOffset, length,
          pathinfo, dataIn);
    }
  } catch (IOException e) {
    // remove from cache
    localDatanodeInfo.removeBlockLocalPathInfo(blk);
    DFSClient.LOG.warn("BlockReaderLocal: Removing " + blk +
        " from cache because local file " + pathinfo.getBlockPath() +
        " could not be opened.");
    throw e;
  } finally {
    if (localBlockReader == null) {
      if (dataIn != null) {
        dataIn.close();
      }
      if (checksumIn != null) {
        checksumIn.close();
      }
    }  
  }
  return localBlockReader;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:75,代码来源:BlockReaderLocal.java

示例5: validateIntegrityAndSetLength

import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; //导入方法依赖的package包/类
/**
 * Find out the number of bytes in the block that match its crc.
 * 
 * This algorithm assumes that data corruption caused by unexpected 
 * datanode shutdown occurs only in the last crc chunk. So it checks
 * only the last chunk.
 * 
 * @param blockFile the block file
 * @param genStamp generation stamp of the block
 * @return the number of valid bytes
 */
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
  DataInputStream checksumIn = null;
  InputStream blockIn = null;
  try {
    final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
    long blockFileLen = blockFile.length();
    long metaFileLen = metaFile.length();
    int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
    if (!blockFile.exists() || blockFileLen == 0 ||
        !metaFile.exists() || metaFileLen < crcHeaderLen) {
      return 0;
    }
    checksumIn = new DataInputStream(
        new BufferedInputStream(new FileInputStream(metaFile),
            HdfsConstants.IO_FILE_BUFFER_SIZE));

    // read and handle the common header here. For now just a version
    BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    short version = header.getVersion();
    if (version != BlockMetadataHeader.VERSION) {
      FsDatasetImpl.LOG.warn("Wrong version (" + version + ") for metadata file "
          + metaFile + " ignoring ...");
    }
    DataChecksum checksum = header.getChecksum();
    int bytesPerChecksum = checksum.getBytesPerChecksum();
    int checksumSize = checksum.getChecksumSize();
    long numChunks = Math.min(
        (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum, 
        (metaFileLen - crcHeaderLen)/checksumSize);
    if (numChunks == 0) {
      return 0;
    }
    IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
    blockIn = new FileInputStream(blockFile);
    long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
    IOUtils.skipFully(blockIn, lastChunkStartPos);
    int lastChunkSize = (int)Math.min(
        bytesPerChecksum, blockFileLen-lastChunkStartPos);
    byte[] buf = new byte[lastChunkSize+checksumSize];
    checksumIn.readFully(buf, lastChunkSize, checksumSize);
    IOUtils.readFully(blockIn, buf, 0, lastChunkSize);

    checksum.update(buf, 0, lastChunkSize);
    long validFileLength;
    if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
      validFileLength = lastChunkStartPos + lastChunkSize;
    } else { // last chunck is corrupt
      validFileLength = lastChunkStartPos;
    }

    // truncate if extra bytes are present without CRC
    if (blockFile.length() > validFileLength) {
      RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
      try {
        // truncate blockFile
        blockRAF.setLength(validFileLength);
      } finally {
        blockRAF.close();
      }
    }

    return validFileLength;
  } catch (IOException e) {
    FsDatasetImpl.LOG.warn(e);
    return 0;
  } finally {
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(blockIn);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:82,代码来源:BlockPoolSlice.java

示例6: newBlockReader

import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; //导入方法依赖的package包/类
/**
 * The only way this object can be instantiated.
 */
public static BlockReaderLocal newBlockReader(Configuration conf,
  String file, int namespaceid, Block blk, DatanodeInfo node, 
  long startOffset, long length,
  DFSClientMetrics metrics, boolean verifyChecksum,
  boolean clearOsBuffer) throws IOException {
  // check in cache first
  BlockPathInfo pathinfo = cache.get(blk);

  if (pathinfo == null) {
    // cache the connection to the local data for eternity.
    if (datanode == null) {
      datanode = DFSClient.createClientDNProtocolProxy(node, conf, 0);
    }
    // make RPC to local datanode to find local pathnames of blocks
    if (datanode.isMethodSupported("getBlockPathInfo", int.class, Block.class)) {
      pathinfo = datanode.getProxy().getBlockPathInfo(namespaceid, blk);
    } else {
      pathinfo = datanode.getProxy().getBlockPathInfo(blk);
    }
    if (pathinfo != null) {
      cache.put(blk, pathinfo);
    }
  }
  
  // check to see if the file exists. It may so happen that the
  // HDFS file has been deleted and this block-lookup is occuring
  // on behalf of a new HDFS file. This time, the block file could
  // be residing in a different portion of the fs.data.dir directory.
  // In this case, we remove this entry from the cache. The next
  // call to this method will repopulate the cache.
  try {

    // get a local file system
    File blkfile = new File(pathinfo.getBlockPath());
    FileInputStream dataIn = new FileInputStream(blkfile);
    
    if (LOG.isDebugEnabled()) {
      LOG.debug("New BlockReaderLocal for file " +
                blkfile + " of size " + blkfile.length() +
                " startOffset " + startOffset +
                " length " + length);
    }

    if (verifyChecksum) {
    
      // get the metadata file
      File metafile = new File(pathinfo.getMetaPath());
      FileInputStream checksumIn = new FileInputStream(metafile);
  
      // read and handle the common header here. For now just a version
      BlockMetadataHeader header = BlockMetadataHeader.readHeader(new DataInputStream(checksumIn), new PureJavaCrc32());
      short version = header.getVersion();
    
      if (version != FSDataset.METADATA_VERSION) {
        LOG.warn("Wrong version (" + version + ") for metadata file for "
            + blk + " ignoring ...");
      }
      DataChecksum checksum = header.getChecksum();

      return new BlockReaderLocal(conf, file, blk, startOffset, length,
          pathinfo, metrics, checksum, verifyChecksum, dataIn, checksumIn,
          clearOsBuffer);
    }
    else {
      return new BlockReaderLocal(conf, file, blk, startOffset, length,
          pathinfo, metrics, dataIn, clearOsBuffer);
    }
    
  } catch (FileNotFoundException e) {
    cache.remove(blk);    // remove from cache
    DFSClient.LOG.warn("BlockReaderLoca: Removing " + blk +
                       " from cache because local file " +
                       pathinfo.getBlockPath() + 
                       " could not be opened.");
    throw e;
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:81,代码来源:BlockReaderLocal.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader.readHeader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。