本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream.getLength方法的典型用法代码示例。如果您正苦于以下问题:Java MetaDataInputStream.getLength方法的具体用法?Java MetaDataInputStream.getLength怎么用?Java MetaDataInputStream.getLength使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream
的用法示例。
在下文中一共展示了MetaDataInputStream.getLength方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: readMetadata
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入方法依赖的package包/类
/**
* Reads the metadata and sends the data in one 'DATA_CHUNK'.
*
* @param in
*/
void readMetadata(DataInputStream in, VersionAndOpcode versionAndOpcode)
throws IOException {
ReadMetadataHeader readMetadataHeader = new ReadMetadataHeader(
versionAndOpcode);
readMetadataHeader.readFields(in);
final int namespaceId = readMetadataHeader.getNamespaceId();
Block block = new Block(readMetadataHeader.getBlockId(), 0,
readMetadataHeader.getGenStamp());
MetaDataInputStream checksumIn = null;
DataOutputStream out = null;
updateCurrentThreadName("reading metadata for block " + block);
try {
checksumIn = datanode.data.getMetaDataInputStream(namespaceId,
block);
long fileSize = checksumIn.getLength();
if (fileSize >= 1L << 31 || fileSize <= 0) {
throw new IOException(
"Unexpected size for checksumFile of block" + block);
}
byte[] buf = new byte[(int) fileSize];
IOUtils.readFully(checksumIn, buf, 0, buf.length);
out = new DataOutputStream(NetUtils.getOutputStream(s,
datanode.socketWriteTimeout));
out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeInt(buf.length);
out.write(buf);
// last DATA_CHUNK
out.writeInt(0);
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
}
}
示例2: readMetadata
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入方法依赖的package包/类
/**
* Reads the metadata and sends the data in one 'DATA_CHUNK'.
* @param in
*/
void readMetadata(DataInputStream in) throws IOException {
Block block = new Block( in.readLong(), 0 , in.readLong());
MetaDataInputStream checksumIn = null;
DataOutputStream out = null;
try {
checksumIn = datanode.data.getMetaDataInputStream(block);
long fileSize = checksumIn.getLength();
if (fileSize >= 1L<<31 || fileSize <= 0) {
throw new IOException("Unexpected size for checksumFile of block" +
block);
}
byte [] buf = new byte[(int)fileSize];
IOUtils.readFully(checksumIn, buf, 0, buf.length);
out = new DataOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeInt(buf.length);
out.write(buf);
//last DATA_CHUNK
out.writeInt(0);
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
}
}
示例3: getBlockChecksum
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入方法依赖的package包/类
/**
* Get block checksum (MD5 of CRC32).
* @param in
*/
void getBlockChecksum(DataInputStream in) throws IOException {
final Block block = new Block(in.readLong(), 0 , in.readLong());
DataOutputStream out = null;
final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
metadataIn, BUFFER_SIZE));
try {
//read metadata file
final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
final DataChecksum checksum = header.getChecksum();
final int bytesPerCRC = checksum.getBytesPerChecksum();
final long crcPerBlock = (metadataIn.getLength()
- BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
//compute block checksum
final MD5Hash md5 = MD5Hash.digest(checksumIn);
if (LOG.isDebugEnabled()) {
LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
}
//write reply
out = new DataOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
md5.write(out);
out.flush();
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
IOUtils.closeStream(metadataIn);
}
}
示例4: getBlockChecksum
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入方法依赖的package包/类
/**
* Get block checksum (MD5 of CRC32).
* @param in
*/
void getBlockChecksum(DataInputStream in) throws IOException {
final Block block = new Block(in.readLong(), 0 , in.readLong());
Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
accessToken.readFields(in);
DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
datanode.socketWriteTimeout));
if (datanode.isBlockTokenEnabled) {
try {
datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
BlockTokenSecretManager.AccessMode.READ);
} catch (InvalidToken e) {
try {
out.writeShort(DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
out.flush();
throw new IOException(
"Access token verification failed, for client " + remoteAddress
+ " for OP_BLOCK_CHECKSUM for block " + block);
} finally {
IOUtils.closeStream(out);
}
}
}
final MetaDataInputStream metadataIn = datanode.data.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
metadataIn, BUFFER_SIZE));
try {
//read metadata file
final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
final DataChecksum checksum = header.getChecksum();
final int bytesPerCRC = checksum.getBytesPerChecksum();
final long crcPerBlock = (metadataIn.getLength()
- BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
//compute block checksum
final MD5Hash md5 = MD5Hash.digest(checksumIn);
if (LOG.isDebugEnabled()) {
LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
}
//write reply
out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
md5.write(out);
out.flush();
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
IOUtils.closeStream(metadataIn);
}
}
示例5: opBlockChecksum
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入方法依赖的package包/类
/**
* Get block checksum (MD5 of CRC32).
*/
@Override
protected void opBlockChecksum(DataInputStream in, Block block,
Token<BlockTokenIdentifier> blockToken) throws IOException {
DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
datanode.socketWriteTimeout));
if (datanode.isBlockTokenEnabled) {
try {
datanode.blockTokenSecretManager.checkAccess(blockToken, null, block,
BlockTokenSecretManager.AccessMode.READ);
} catch (InvalidToken e) {
try {
ERROR_ACCESS_TOKEN.write(out);
out.flush();
LOG.warn("Block token verification failed, for client "
+ remoteAddress + " for OP_BLOCK_CHECKSUM for block " + block
+ " : " + e.getLocalizedMessage());
throw e;
} finally {
IOUtils.closeStream(out);
}
}
}
updateCurrentThreadName("Reading metadata for block " + block);
final MetaDataInputStream metadataIn =
datanode.data.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
metadataIn, BUFFER_SIZE));
updateCurrentThreadName("Getting checksum for block " + block);
try {
//read metadata file
final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
final DataChecksum checksum = header.getChecksum();
final int bytesPerCRC = checksum.getBytesPerChecksum();
final long crcPerBlock = (metadataIn.getLength()
- BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
//compute block checksum
final MD5Hash md5 = MD5Hash.digest(checksumIn);
if (LOG.isDebugEnabled()) {
LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
}
//write reply
SUCCESS.write(out);
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
md5.write(out);
out.flush();
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
IOUtils.closeStream(metadataIn);
}
//update metrics
updateDuration(datanode.myMetrics.blockChecksumOp);
}
示例6: getBlockChecksum
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream; //导入方法依赖的package包/类
/**
* Get block checksum (MD5 of CRC32).
*
* @param in
*/
void getBlockChecksum(DataInputStream in, VersionAndOpcode versionAndOpcode)
throws IOException {
// header
BlockChecksumHeader blockChecksumHeader = new BlockChecksumHeader(
versionAndOpcode);
blockChecksumHeader.readFields(in);
final int namespaceId = blockChecksumHeader.getNamespaceId();
final Block block = new Block(blockChecksumHeader.getBlockId(), 0,
blockChecksumHeader.getGenStamp());
DataOutputStream out = null;
final MetaDataInputStream metadataIn = datanode.data
.getMetaDataInputStream(namespaceId, block);
final DataInputStream checksumIn = new DataInputStream(
new BufferedInputStream(metadataIn, BUFFER_SIZE));
updateCurrentThreadName("getting checksum for block " + block);
try {
// read metadata file
final BlockMetadataHeader header = BlockMetadataHeader
.readHeader(checksumIn);
final DataChecksum checksum = header.getChecksum();
final int bytesPerCRC = checksum.getBytesPerChecksum();
final long crcPerBlock = (metadataIn.getLength() - BlockMetadataHeader
.getHeaderSize()) / checksum.getChecksumSize();
// compute block checksum
final MD5Hash md5 = MD5Hash.digest(checksumIn);
if (LOG.isDebugEnabled()) {
LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
}
// write reply
out = new DataOutputStream(NetUtils.getOutputStream(s,
datanode.socketWriteTimeout));
out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS);
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
md5.write(out);
out.flush();
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
IOUtils.closeStream(metadataIn);
}
}