本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileBlock.getBufferReadOnly方法的典型用法代码示例。如果您正苦于以下问题:Java HFileBlock.getBufferReadOnly方法的具体用法?Java HFileBlock.getBufferReadOnly怎么用?Java HFileBlock.getBufferReadOnly使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFileBlock
的用法示例。
在下文中一共展示了HFileBlock.getBufferReadOnly方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: contains
import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset,
keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false, true,
BlockType.BLOOM_CHUNK, null);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf, bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例2: contains
import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false,
BlockType.BLOOM_CHUNK);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例3: contains
import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false, true,
BlockType.BLOOM_CHUNK);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例4: writeToCache
import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
public BucketEntry writeToCache(final IOEngine ioEngine,
final BucketAllocator bucketAllocator,
final UniqueIndexMap<Integer> deserialiserMap,
final LongAdder realCacheSize) throws CacheFullException, IOException,
BucketAllocatorException {
int len = data.getSerializedLength();
// This cacheable thing can't be serialized
if (len == 0) return null;
long offset = bucketAllocator.allocateBlock(len);
BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory);
bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap);
try {
if (data instanceof HFileBlock) {
// If an instance of HFileBlock, save on some allocations.
HFileBlock block = (HFileBlock)data;
ByteBuff sliceBuf = block.getBufferReadOnly();
ByteBuffer metadata = block.getMetaData();
if (LOG.isTraceEnabled()) {
LOG.trace("Write offset=" + offset + ", len=" + len);
}
ioEngine.write(sliceBuf, offset);
ioEngine.write(metadata, offset + len - metadata.limit());
} else {
ByteBuffer bb = ByteBuffer.allocate(len);
data.serialize(bb);
ioEngine.write(bb, offset);
}
} catch (IOException ioe) {
// free it in bucket allocator
bucketAllocator.freeBlock(offset);
throw ioe;
}
realCacheSize.add(len);
return bucketEntry;
}
示例5: contains
import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf.array(), bloomBuf.arrayOffset() + HFileBlock.HEADER_SIZE,
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}
示例6: contains
import org.apache.hadoop.hbase.io.hfile.HFileBlock; //导入方法依赖的package包/类
@Override
public boolean contains(byte[] key, int keyOffset, int keyLength,
ByteBuffer bloom) {
// We try to store the result in this variable so we can update stats for
// testing, but when an error happens, we log a message and return.
boolean result;
int block = index.rootBlockContainingKey(key, keyOffset,
keyLength);
if (block < 0) {
result = false; // This key is not in the file.
} else {
HFileBlock bloomBlock;
try {
// We cache the block and use a positional read.
bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
index.getRootBlockDataSize(block), true, true, false, true,
BlockType.BLOOM_CHUNK, null);
} catch (IOException ex) {
// The Bloom filter is broken, turn it off.
throw new IllegalArgumentException(
"Failed to load Bloom block for key "
+ Bytes.toStringBinary(key, keyOffset, keyLength), ex);
}
ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
result = ByteBloomFilter.contains(key, keyOffset, keyLength,
bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
}
if (numQueriesPerChunk != null && block >= 0) {
// Update statistics. Only used in unit tests.
++numQueriesPerChunk[block];
if (result)
++numPositivesPerChunk[block];
}
return result;
}