当前位置: 首页>>代码示例>>Java>>正文


Java MetaDataInputStream类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream的典型用法代码示例。如果您正苦于以下问题:Java MetaDataInputStream类的具体用法?Java MetaDataInputStream怎么用?Java MetaDataInputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


MetaDataInputStream类属于org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface包,在下文中一共展示了MetaDataInputStream类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: readMetadata

import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入依赖的package包/类
/**
 * Reads the metadata and sends the data in one 'DATA_CHUNK'.
 * 
 * @param in
 */
void readMetadata(DataInputStream in, VersionAndOpcode versionAndOpcode)
		throws IOException {
	ReadMetadataHeader readMetadataHeader = new ReadMetadataHeader(
			versionAndOpcode);
	readMetadataHeader.readFields(in);
	final int namespaceId = readMetadataHeader.getNamespaceId();
	Block block = new Block(readMetadataHeader.getBlockId(), 0,
			readMetadataHeader.getGenStamp());
	MetaDataInputStream checksumIn = null;
	DataOutputStream out = null;
	updateCurrentThreadName("reading metadata for block " + block);
	try {
		checksumIn = datanode.data.getMetaDataInputStream(namespaceId,
				block);

		long fileSize = checksumIn.getLength();

		if (fileSize >= 1L << 31 || fileSize <= 0) {
			throw new IOException(
					"Unexpected size for checksumFile of block" + block);
		}

		byte[] buf = new byte[(int) fileSize];
		IOUtils.readFully(checksumIn, buf, 0, buf.length);

		out = new DataOutputStream(NetUtils.getOutputStream(s,
				datanode.socketWriteTimeout));

		out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS);
		out.writeInt(buf.length);
		out.write(buf);

		// last DATA_CHUNK
		out.writeInt(0);
	} finally {
		IOUtils.closeStream(out);
		IOUtils.closeStream(checksumIn);
	}
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:45,代码来源:DataXceiver.java

示例2: readMetadata

import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入依赖的package包/类
/**
 * Reads the metadata and sends the data in one 'DATA_CHUNK'.
 * @param in
 */
void readMetadata(DataInputStream in) throws IOException {
  Block block = new Block( in.readLong(), 0 , in.readLong());
  MetaDataInputStream checksumIn = null;
  DataOutputStream out = null;
  
  try {

    checksumIn = datanode.data.getMetaDataInputStream(block);
    
    long fileSize = checksumIn.getLength();

    if (fileSize >= 1L<<31 || fileSize <= 0) {
        throw new IOException("Unexpected size for checksumFile of block" +
                block);
    }

    byte [] buf = new byte[(int)fileSize];
    IOUtils.readFully(checksumIn, buf, 0, buf.length);
    
    out = new DataOutputStream(
              NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    
    out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS);
    out.writeInt(buf.length);
    out.write(buf);
    
    //last DATA_CHUNK
    out.writeInt(0);
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:38,代码来源:DataXceiver.java

示例3: getBlockChecksum

import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入依赖的package包/类
/**
 * Get block checksum (MD5 of CRC32).
 * @param in
 */
void getBlockChecksum(DataInputStream in) throws IOException {
  final Block block = new Block(in.readLong(), 0 , in.readLong());

  DataOutputStream out = null;
  final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, BUFFER_SIZE));

  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = (metadataIn.getLength()
        - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
    
    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    out = new DataOutputStream(
        NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
    out.writeInt(bytesPerCRC);
    out.writeLong(crcPerBlock);
    md5.write(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:43,代码来源:DataXceiver.java

示例4: getBlockChecksum

import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入依赖的package包/类
/**
 * Get block checksum (MD5 of CRC32).
 * @param in
 */
void getBlockChecksum(DataInputStream in) throws IOException {
  final Block block = new Block(in.readLong(), 0 , in.readLong());
  Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
  accessToken.readFields(in);
  DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
      datanode.socketWriteTimeout));
  if (datanode.isBlockTokenEnabled) {
    try {
      datanode.blockTokenSecretManager.checkAccess(accessToken, null, block, 
          BlockTokenSecretManager.AccessMode.READ);
    } catch (InvalidToken e) {
      try {
        out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
        out.flush();
        throw new IOException(
            "Access token verification failed, for client " + remoteAddress
                + " for OP_BLOCK_CHECKSUM for block " + block);
      } finally {
        IOUtils.closeStream(out);
      }
    }
  }

  final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, BUFFER_SIZE));

  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = (metadataIn.getLength()
        - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
    
    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
    out.writeInt(bytesPerCRC);
    out.writeLong(crcPerBlock);
    md5.write(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:60,代码来源:DataXceiver.java

示例5: opBlockChecksum

import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入依赖的package包/类
/**
 * Get block checksum (MD5 of CRC32).
 */
@Override
protected void opBlockChecksum(DataInputStream in, Block block,
    Token<BlockTokenIdentifier> blockToken) throws IOException {
  DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
      datanode.socketWriteTimeout));
  if (datanode.isBlockTokenEnabled) {
    try {
      datanode.blockTokenSecretManager.checkAccess(blockToken, null, block,
          BlockTokenSecretManager.AccessMode.READ);
    } catch (InvalidToken e) {
      try {
        ERROR_ACCESS_TOKEN.write(out);
        out.flush();
        LOG.warn("Block token verification failed, for client "
            + remoteAddress + " for OP_BLOCK_CHECKSUM for block " + block
            + " : " + e.getLocalizedMessage());
        throw e;
      } finally {
        IOUtils.closeStream(out);
      }

    }
  }

  updateCurrentThreadName("Reading metadata for block " + block);
  final MetaDataInputStream metadataIn = 
    datanode.data.getMetaDataInputStream(block);
  final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
      metadataIn, BUFFER_SIZE));

  updateCurrentThreadName("Getting checksum for block " + block);
  try {
    //read metadata file
    final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
    final DataChecksum checksum = header.getChecksum(); 
    final int bytesPerCRC = checksum.getBytesPerChecksum();
    final long crcPerBlock = (metadataIn.getLength()
        - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
    
    //compute block checksum
    final MD5Hash md5 = MD5Hash.digest(checksumIn);

    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
          + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
    }

    //write reply
    SUCCESS.write(out);
    out.writeInt(bytesPerCRC);
    out.writeLong(crcPerBlock);
    md5.write(out);
    out.flush();
  } finally {
    IOUtils.closeStream(out);
    IOUtils.closeStream(checksumIn);
    IOUtils.closeStream(metadataIn);
  }

  //update metrics
  updateDuration(datanode.myMetrics.blockChecksumOp);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:66,代码来源:DataXceiver.java

示例6: getBlockChecksum

import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入依赖的package包/类
/**
 * Get block checksum (MD5 of CRC32).
 * 
 * @param in
 */
void getBlockChecksum(DataInputStream in, VersionAndOpcode versionAndOpcode)
		throws IOException {
	// header
	BlockChecksumHeader blockChecksumHeader = new BlockChecksumHeader(
			versionAndOpcode);
	blockChecksumHeader.readFields(in);
	final int namespaceId = blockChecksumHeader.getNamespaceId();
	final Block block = new Block(blockChecksumHeader.getBlockId(), 0,
			blockChecksumHeader.getGenStamp());

	DataOutputStream out = null;
	final MetaDataInputStream metadataIn = datanode.data
			.getMetaDataInputStream(namespaceId, block);
	final DataInputStream checksumIn = new DataInputStream(
			new BufferedInputStream(metadataIn, BUFFER_SIZE));

	updateCurrentThreadName("getting checksum for block " + block);
	try {
		// read metadata file
		final BlockMetadataHeader header = BlockMetadataHeader
				.readHeader(checksumIn);
		final DataChecksum checksum = header.getChecksum();
		final int bytesPerCRC = checksum.getBytesPerChecksum();
		final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader
				.getHeaderSize()) / checksum.getChecksumSize();

		// compute block checksum
		final MD5Hash md5 = MD5Hash.digest(checksumIn);

		if (LOG.isDebugEnabled()) {
			LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
					+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
		}

		// write reply
		out = new DataOutputStream(NetUtils.getOutputStream(s,
				datanode.socketWriteTimeout));
		out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
		out.writeInt(bytesPerCRC);
		out.writeLong(crcPerBlock);
		md5.write(out);
		out.flush();
	} finally {
		IOUtils.closeStream(out);
		IOUtils.closeStream(checksumIn);
		IOUtils.closeStream(metadataIn);
	}
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:54,代码来源:DataXceiver.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。