本文整理汇总了Java中org.apache.hadoop.hbase.util.ChecksumType类的典型用法代码示例。如果您正苦于以下问题:Java ChecksumType类的具体用法?Java ChecksumType怎么用?Java ChecksumType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ChecksumType类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ChecksumType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateChecksums
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Generates a checksum for all the data in indata. The checksum is
* written to outdata.
* @param indata input data stream
* @param startOffset starting offset in the indata stream from where to
* compute checkums from
* @param endOffset ending offset in the indata stream upto
* which checksums needs to be computed
* @param outdata the output buffer where checksum values are written
* @param outOffset the starting offset in the outdata where the
* checksum values are written
* @param checksumType type of checksum
* @param bytesPerChecksum number of bytes per checksum value
*/
static void generateChecksums(byte[] indata, int startOffset, int endOffset,
byte[] outdata, int outOffset, ChecksumType checksumType,
int bytesPerChecksum) throws IOException {
if (checksumType == ChecksumType.NULL) {
return; // No checksum for this block.
}
DataChecksum checksum = DataChecksum.newDataChecksum(
checksumType.getDataChecksumType(), bytesPerChecksum);
checksum.calculateChunkedSums(
ByteBuffer.wrap(indata, startOffset, endOffset - startOffset),
ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
示例2: HFileBlock
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Creates a block from an existing buffer starting with a header. Rewinds
* and takes ownership of the buffer. By definition of rewind, ignores the
* buffer position, but if you slice the buffer beforehand, it will rewind
* to that point. The reason this has a minorNumber and not a majorNumber is
* because majorNumbers indicate the format of a HFile whereas minorNumbers
* indicate the format inside a HFileBlock.
*/
HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
b.rewind();
blockType = BlockType.read(b);
onDiskSizeWithoutHeader = b.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX);
uncompressedSizeWithoutHeader = b.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
prevBlockOffset = b.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
HFileContextBuilder contextBuilder = new HFileContextBuilder();
contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
if (usesHBaseChecksum) {
contextBuilder.withChecksumType(ChecksumType.codeToType(b.get(Header.CHECKSUM_TYPE_INDEX)));
contextBuilder.withBytesPerCheckSum(b.getInt(Header.BYTES_PER_CHECKSUM_INDEX));
this.onDiskDataSizeWithHeader = b.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
} else {
contextBuilder.withChecksumType(ChecksumType.NULL);
contextBuilder.withBytesPerCheckSum(0);
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
}
this.fileContext = contextBuilder.build();
buf = b;
buf.rewind();
}
示例3: getBlockForCaching
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Creates a new HFileBlock. Checksums have already been validated, so
* the byte buffer passed into the constructor of this newly created
* block does not have checksum data even though the header minor
* version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a
* 0 value in bytesPerChecksum.
*/
public HFileBlock getBlockForCaching(CacheConfig cacheConf) {
HFileContext newContext = new HFileContextBuilder()
.withBlockSize(fileContext.getBlocksize())
.withBytesPerCheckSum(0)
.withChecksumType(ChecksumType.NULL) // no checksums in cached data
.withCompression(fileContext.getCompression())
.withDataBlockEncoding(fileContext.getDataBlockEncoding())
.withHBaseCheckSum(fileContext.isUseHBaseChecksum())
.withCompressTags(fileContext.isCompressTags())
.withIncludesMvcc(fileContext.isIncludesMvcc())
.withIncludesTags(fileContext.isIncludesTags())
.build();
return new HFileBlock(blockType, getOnDiskSizeWithoutHeader(),
getUncompressedSizeWithoutHeader(), prevOffset,
cacheConf.shouldCacheCompressed(blockType.getCategory()) ?
getOnDiskBufferWithHeader() :
getUncompressedBufferWithHeader(),
FILL_HEADER, startOffset,
onDiskBytesWithHeader.length + onDiskChecksum.length, newContext);
}
示例4: toStringHeader
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Convert the contents of the block header into a human readable string.
* This is mostly helpful for debugging. This assumes that the block
* has minor version > 0.
*/
static String toStringHeader(ByteBuffer buf) throws IOException {
byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)];
buf.get(magicBuf);
BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);
int compressedBlockSizeNoHeader = buf.getInt();
int uncompressedBlockSizeNoHeader = buf.getInt();
long prevBlockOffset = buf.getLong();
byte cksumtype = buf.get();
long bytesPerChecksum = buf.getInt();
long onDiskDataSizeWithHeader = buf.getInt();
return " Header dump: magic: " + Bytes.toString(magicBuf) +
" blockType " + bt +
" compressedBlockSizeNoHeader " +
compressedBlockSizeNoHeader +
" uncompressedBlockSizeNoHeader " +
uncompressedBlockSizeNoHeader +
" prevBlockOffset " + prevBlockOffset +
" checksumType " + ChecksumType.codeToType(cksumtype) +
" bytesPerChecksum " + bytesPerChecksum +
" onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
示例5: testNewBlocksHaveDefaultChecksum
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().build();
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1000; ++i)
dos.writeInt(i);
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
is, totalSize, (HFileSystem) fs, path, meta);
HFileBlock b = hbr.readBlockData(0, -1, -1, false);
assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
示例6: testHeaderSizeInCacheWithoutChecksumInternals
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
private void testHeaderSizeInCacheWithoutChecksumInternals(boolean useTags) throws IOException {
int headerSize = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
// Create some KVs and create the block with old-style header.
List<KeyValue> kvs = generator.generateTestKeyValues(60, useTags);
ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
int size = keyValues.limit();
ByteBuffer buf = ByteBuffer.allocate(size + headerSize);
buf.position(headerSize);
keyValues.rewind();
buf.put(keyValues);
HFileContext hfileContext = new HFileContextBuilder().withHBaseCheckSum(false)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTags)
.withBlockSize(0)
.withChecksumType(ChecksumType.NULL)
.build();
HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf,
HFileBlock.FILL_HEADER, 0,
0, hfileContext);
HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTags);
assertEquals(headerSize, cacheBlock.getDummyHeaderForVersion().length);
}
示例7: getSampleHFileBlock
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
int size = keyValues.limit();
ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
keyValues.rewind();
buf.put(keyValues);
HFileContext meta = new HFileContextBuilder()
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTag)
.withHBaseCheckSum(true)
.withCompression(Algorithm.NONE)
.withBlockSize(0)
.withChecksumType(ChecksumType.NULL)
.build();
HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, buf,
HFileBlock.FILL_HEADER, 0,
0, meta);
return b;
}
示例8: HFileContext
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
Encryption.Context cryptoContext, long fileCreateTime) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
this.compressAlgo = compressAlgo;
this.compressTags = compressTags;
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
this.blocksize = blockSize;
if (encoding != null) {
this.encoding = encoding;
}
this.cryptoContext = cryptoContext;
this.fileCreateTime = fileCreateTime;
}
示例9: HFileWriterV2
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
final KeyComparator comparator, final ChecksumType checksumType,
final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
super(cacheConf,
ostream == null ? createOutputStream(conf, fs, path) : ostream,
path, blockSize, compressAlgo, blockEncoder, comparator);
SchemaMetrics.configureGlobally(conf);
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
this.includeMemstoreTS = includeMVCCReadpoint;
if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
this.minorVersion = 0;
}
finishInit(conf);
}
示例10: HFileBlock
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Creates a block from an existing buffer starting with a header. Rewinds and takes ownership of
* the buffer. By definition of rewind, ignores the buffer position, but if you slice the buffer
* beforehand, it will rewind to that point. The reason this has a minorNumber and not a
* majorNumber is because majorNumbers indicate the format of a HFile whereas minorNumbers
* indicate the format inside a HFileBlock.
*/
HFileBlock(ByteBuffer b, int minorVersion) throws IOException {
b.rewind();
blockType = BlockType.read(b);
onDiskSizeWithoutHeader = b.getInt();
uncompressedSizeWithoutHeader = b.getInt();
prevBlockOffset = b.getLong();
this.minorVersion = minorVersion;
if (minorVersion >= MINOR_VERSION_WITH_CHECKSUM) {
this.checksumType = b.get();
this.bytesPerChecksum = b.getInt();
this.onDiskDataSizeWithHeader = b.getInt();
} else {
this.checksumType = ChecksumType.NULL.getCode();
this.bytesPerChecksum = 0;
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader + HEADER_SIZE_NO_CHECKSUM;
}
buf = b;
buf.rewind();
}
示例11: toStringHeader
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Convert the contents of the block header into a human readable string. This is mostly helpful
* for debugging. This assumes that the block has minor version > 0.
*/
static String toStringHeader(ByteBuffer buf) throws IOException {
int offset = buf.arrayOffset();
byte[] b = buf.array();
long magic = Bytes.toLong(b, offset);
BlockType bt = BlockType.read(buf);
offset += Bytes.SIZEOF_LONG;
int compressedBlockSizeNoHeader = Bytes.toInt(b, offset);
offset += Bytes.SIZEOF_INT;
int uncompressedBlockSizeNoHeader = Bytes.toInt(b, offset);
offset += Bytes.SIZEOF_INT;
long prevBlockOffset = Bytes.toLong(b, offset);
offset += Bytes.SIZEOF_LONG;
byte cksumtype = b[offset];
offset += Bytes.SIZEOF_BYTE;
long bytesPerChecksum = Bytes.toInt(b, offset);
offset += Bytes.SIZEOF_INT;
long onDiskDataSizeWithHeader = Bytes.toInt(b, offset);
offset += Bytes.SIZEOF_INT;
return " Header dump: magic: " + magic + " blockType " + bt + " compressedBlockSizeNoHeader "
+ compressedBlockSizeNoHeader + " uncompressedBlockSizeNoHeader "
+ uncompressedBlockSizeNoHeader + " prevBlockOffset " + prevBlockOffset + " checksumType "
+ ChecksumType.codeToType(cksumtype) + " bytesPerChecksum " + bytesPerChecksum
+ " onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
示例12: HFileBlock
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Creates a block from an existing buffer starting with a header. Rewinds
* and takes ownership of the buffer. By definition of rewind, ignores the
* buffer position, but if you slice the buffer beforehand, it will rewind
* to that point. The reason this has a minorNumber and not a majorNumber is
* because majorNumbers indicate the format of a HFile whereas minorNumbers
* indicate the format inside a HFileBlock.
*/
HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
b.rewind();
blockType = BlockType.read(b);
onDiskSizeWithoutHeader = b.getInt();
uncompressedSizeWithoutHeader = b.getInt();
prevBlockOffset = b.getLong();
HFileContextBuilder contextBuilder = new HFileContextBuilder();
contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
if (usesHBaseChecksum) {
contextBuilder.withChecksumType(ChecksumType.codeToType(b.get()));
contextBuilder.withBytesPerCheckSum(b.getInt());
this.onDiskDataSizeWithHeader = b.getInt();
} else {
contextBuilder.withChecksumType(ChecksumType.NULL);
contextBuilder.withBytesPerCheckSum(0);
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
}
this.fileContext = contextBuilder.build();
buf = b;
buf.rewind();
}
示例13: toStringHeader
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Convert the contents of the block header into a human readable string.
* This is mostly helpful for debugging. This assumes that the block
* has minor version > 0.
*/
static String toStringHeader(ByteBuffer buf) throws IOException {
byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)];
buf.get(magicBuf);
BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH);
int compressedBlockSizeNoHeader = buf.getInt();;
int uncompressedBlockSizeNoHeader = buf.getInt();;
long prevBlockOffset = buf.getLong();
byte cksumtype = buf.get();
long bytesPerChecksum = buf.getInt();
long onDiskDataSizeWithHeader = buf.getInt();
return " Header dump: magic: " + Bytes.toString(magicBuf) +
" blockType " + bt +
" compressedBlockSizeNoHeader " +
compressedBlockSizeNoHeader +
" uncompressedBlockSizeNoHeader " +
uncompressedBlockSizeNoHeader +
" prevBlockOffset " + prevBlockOffset +
" checksumType " + ChecksumType.codeToType(cksumtype) +
" bytesPerChecksum " + bytesPerChecksum +
" onDiskDataSizeWithHeader " + onDiskDataSizeWithHeader;
}
示例14: HFileContext
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
Encryption.Context cryptoContext) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
this.compressAlgo = compressAlgo;
this.compressTags = compressTags;
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
this.blocksize = blockSize;
if (encoding != null) {
this.encoding = encoding;
}
this.cryptoContext = cryptoContext;
}
示例15: HFileBlock
import org.apache.hadoop.hbase.util.ChecksumType; //导入依赖的package包/类
/**
* Creates a block from an existing buffer starting with a header. Rewinds
* and takes ownership of the buffer. By definition of rewind, ignores the
* buffer position, but if you slice the buffer beforehand, it will rewind
* to that point. The reason this has a minorNumber and not a majorNumber is
* because majorNumbers indicate the format of a HFile whereas minorNumbers
* indicate the format inside a HFileBlock.
*/
HFileBlock(ByteBuffer b, boolean usesHBaseChecksum) throws IOException {
b.rewind();
blockType = BlockType.read(b);
onDiskSizeWithoutHeader = b.getInt();
uncompressedSizeWithoutHeader = b.getInt();
prevBlockOffset = b.getLong();
HFileContextBuilder contextBuilder = new HFileContextBuilder();
contextBuilder.withHBaseCheckSum(usesHBaseChecksum);
if (usesHBaseChecksum) {
contextBuilder.withChecksumType(ChecksumType.codeToType(b.get()));
contextBuilder.withBytesPerCheckSum(b.getInt());
this.onDiskDataSizeWithHeader = b.getInt();
} else {
contextBuilder.withChecksumType(ChecksumType.NULL);
contextBuilder.withBytesPerCheckSum(0);
this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader +
HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
}
this.fileContext = contextBuilder.build();
buf = b;
buf.rewind();
}