本文整理汇总了Java中org.apache.hadoop.hbase.io.HeapSize类的典型用法代码示例。如果您正苦于以下问题:Java HeapSize类的具体用法?Java HeapSize怎么用?Java HeapSize使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HeapSize类属于org.apache.hadoop.hbase.io包,在下文中一共展示了HeapSize类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testHeapSizeChanges
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Just checks if heapsize grows when something is cached, and gets smaller
* when the same object is evicted
*/
public static void testHeapSizeChanges(final BlockCache toBeTested,
final int blockSize) {
HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
long heapSize = ((HeapSize) toBeTested).heapSize();
toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);
/*When we cache something HeapSize should always increase */
assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());
toBeTested.evictBlock(blocks[0].blockName);
/*Post eviction, heapsize should be the same */
assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
示例2: testEncodingWithCacheInternals
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
HFileBlock block = getSampleHFileBlock(kvs, useTag);
HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);
LruBlockCache blockCache =
new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
blockCache.cacheBlock(cacheKey, cacheBlock);
HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
assertTrue(heapSize instanceof HFileBlock);
HFileBlock returnedBlock = (HFileBlock) heapSize;;
if (blockEncoder.getDataBlockEncoding() ==
DataBlockEncoding.NONE) {
assertEquals(block.getBufferWithHeader(),
returnedBlock.getBufferWithHeader());
} else {
if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
System.out.println(blockEncoder);
}
assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
}
}
示例3: testEncodingWithCache
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Test putting and taking out blocks into cache with different
* encoding options.
*/
@Test
public void testEncodingWithCache() {
HFileBlock block = getSampleHFileBlock();
LruBlockCache blockCache =
new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
blockCache.cacheBlock(cacheKey, cacheBlock);
HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
assertTrue(heapSize instanceof HFileBlock);
HFileBlock returnedBlock = (HFileBlock) heapSize;;
if (blockEncoder.getEncodingInCache() ==
DataBlockEncoding.NONE) {
assertEquals(block.getBufferWithHeader(),
returnedBlock.getBufferWithHeader());
} else {
if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
System.out.println(blockEncoder);
}
assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
}
}
示例4: testEncodingWithCacheInternals
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
HFileBlock block = getSampleHFileBlock(useTag);
HFileBlock cacheBlock = createBlockOnDisk(block, useTag);
LruBlockCache blockCache =
new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
blockCache.cacheBlock(cacheKey, cacheBlock);
HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
assertTrue(heapSize instanceof HFileBlock);
HFileBlock returnedBlock = (HFileBlock) heapSize;;
if (blockEncoder.getDataBlockEncoding() ==
DataBlockEncoding.NONE) {
assertEquals(block.getBufferWithHeader(),
returnedBlock.getBufferWithHeader());
} else {
if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
System.out.println(blockEncoder);
}
assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
}
}
示例5: testHeapSize
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Test
public void testHeapSize() {
Cell originalCell = CellUtil.createCell(Bytes.toBytes("row"), Bytes.toBytes("value"));
final int fakeTagArrayLength = 10;
Cell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]);
// Get the heapSize before the internal tags array in trCell are nuked
long trCellHeapSize = ((HeapSize)trCell).heapSize();
// Make another TagRewriteCell with the original TagRewriteCell
// This happens on systems with more than one RegionObserver/Coproc loaded (such as
// VisibilityController and AccessController)
Cell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]);
assertTrue("TagRewriteCell containing a TagRewriteCell's heapsize should be larger than a " +
"single TagRewriteCell's heapsize", trCellHeapSize < ((HeapSize)trCell2).heapSize());
assertTrue("TagRewriteCell should have had nulled out tags array", ((HeapSize)trCell).heapSize() <
trCellHeapSize);
}
示例6: testEncodingWithCacheInternals
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
HFileBlock block = getSampleHFileBlock(kvs, useTag);
HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);
LruBlockCache blockCache =
new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
blockCache.cacheBlock(cacheKey, cacheBlock);
HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
assertTrue(heapSize instanceof HFileBlock);
HFileBlock returnedBlock = (HFileBlock) heapSize;
if (blockEncoder.getDataBlockEncoding() ==
DataBlockEncoding.NONE) {
assertEquals(block.getBufferReadOnly(), returnedBlock.getBufferReadOnly());
} else {
if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
System.out.println(blockEncoder);
}
assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
}
}
示例7: testEncodingWithCache
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Test putting and taking out blocks into cache with different
* encoding options.
* @throws IOException
*/
@Test
public void testEncodingWithCache() throws IOException {
HFileBlock block = getSampleHFileBlock();
LruBlockCache blockCache =
new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
HFileBlock cacheBlock = createBlockOnDisk(block);
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
blockCache.cacheBlock(cacheKey, cacheBlock);
HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
assertTrue(heapSize instanceof HFileBlock);
HFileBlock returnedBlock = (HFileBlock) heapSize;;
if (blockEncoder.getDataBlockEncoding() ==
DataBlockEncoding.NONE) {
assertEquals(block.getBufferWithHeader(),
returnedBlock.getBufferWithHeader());
} else {
if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
System.out.println(blockEncoder);
}
assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
}
}
示例8: testEncodingWithCache
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Test putting and taking out blocks into cache with different
* encoding options.
*/
@Test
public void testEncodingWithCache() {
HFileBlock block = getSampleHFileBlock();
LruBlockCache blockCache =
new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
blockCache.cacheBlock(cacheKey, cacheBlock);
HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
assertTrue(heapSize instanceof HFileBlock);
HFileBlock returnedBlock = (HFileBlock) heapSize;;
if (blockEncoder.getEncodingInCache() ==
DataBlockEncoding.NONE) {
assertEquals(block.getBufferWithHeader(),
returnedBlock.getBufferWithHeader());
} else {
if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
System.out.println(blockEncoder);
}
assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
}
}
示例9: heapSize
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Override
public long heapSize() {
long l2size = 0;
if (l2Cache instanceof HeapSize) {
l2size = ((HeapSize) l2Cache).heapSize();
}
return lruCache.heapSize() + l2size;
}
示例10: testPutAndCheckAndPutInParallel
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Test written as a verifier for HBASE-7051, CheckAndPut should properly read
* MVCC.
*
* Moved into TestAtomicOperation from its original location, TestHBase7051
*/
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {
final String tableName = "testPutAndCheckAndPut";
Configuration conf = TEST_UTIL.getConfiguration();
conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));
Put[] puts = new Put[1];
Put put = new Put(Bytes.toBytes("r1"));
put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
puts[0] = put;
region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
MultithreadedTestUtil.TestContext ctx =
new MultithreadedTestUtil.TestContext(conf);
ctx.addThread(new PutThread(ctx, region));
ctx.addThread(new CheckAndPutThread(ctx, region));
ctx.startThreads();
while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
Thread.sleep(100);
}
ctx.stop();
Scan s = new Scan();
RegionScanner scanner = region.getScanner(s);
List<Cell> results = new ArrayList<Cell>();
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
scanner.next(results, scannerContext);
for (Cell keyValue : results) {
assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
}
}
示例11: testRedundantRowKeys
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testRedundantRowKeys() throws Exception {
final int batchSize = 100000;
String tableName = getClass().getSimpleName();
Configuration conf = HBaseConfiguration.create();
conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName), tableName, conf, Bytes.toBytes("a"));
List<Pair<Mutation, Integer>> someBatch = Lists.newArrayList();
int i = 0;
while (i < batchSize) {
if (i % 2 == 0) {
someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(0)), null));
} else {
someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(1)), null));
}
i++;
}
long start = System.nanoTime();
region.batchMutate(someBatch.toArray(new Pair[0]));
long duration = System.nanoTime() - start;
System.out.println("Batch mutate took: " + duration + "ns");
assertEquals(2, region.getAcquiredLockCount());
}
示例12: testPutAndCheckAndPutInParallel
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {
final String tableName = "testPutAndCheckAndPut";
Configuration conf = HBaseConfiguration.create();
conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
final MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName),
tableName, conf, Bytes.toBytes(family));
List<Pair<Mutation, Integer>> putsAndLocks = Lists.newArrayList();
Put[] puts = new Put[1];
Put put = new Put(Bytes.toBytes("r1"));
put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
puts[0] = put;
Pair<Mutation, Integer> pair = new Pair<Mutation, Integer>(puts[0], null);
putsAndLocks.add(pair);
region.batchMutate(putsAndLocks.toArray(new Pair[0]));
MultithreadedTestUtil.TestContext ctx =
new MultithreadedTestUtil.TestContext(conf);
ctx.addThread(new PutThread(ctx, region));
ctx.addThread(new CheckAndPutThread(ctx, region));
ctx.startThreads();
while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
Thread.sleep(100);
}
ctx.stop();
Scan s = new Scan();
RegionScanner scanner = region.getScanner(s);
List<KeyValue> results = new ArrayList<KeyValue>();
scanner.next(results, 2);
for (KeyValue keyValue : results) {
assertEquals("50",Bytes.toString(keyValue.getValue()));
}
}
示例13: testPutAndCheckAndPutInParallel
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Test written as a verifier for HBASE-7051, CheckAndPut should properly read
* MVCC.
*
* Moved into TestAtomicOperation from its original location, TestHBase7051
*/
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {
final String tableName = "testPutAndCheckAndPut";
Configuration conf = TEST_UTIL.getConfiguration();
conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));
Put[] puts = new Put[1];
Put put = new Put(Bytes.toBytes("r1"));
put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
puts[0] = put;
region.batchMutate(puts);
MultithreadedTestUtil.TestContext ctx =
new MultithreadedTestUtil.TestContext(conf);
ctx.addThread(new PutThread(ctx, region));
ctx.addThread(new CheckAndPutThread(ctx, region));
ctx.startThreads();
while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
Thread.sleep(100);
}
ctx.stop();
Scan s = new Scan();
RegionScanner scanner = region.getScanner(s);
List<Cell> results = new ArrayList<Cell>();
scanner.next(results, 2);
for (Cell keyValue : results) {
assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
}
}
示例14: heapSize
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Override
public long heapSize() {
long l2size = 0;
if (l2Cache instanceof HeapSize) {
l2size = ((HeapSize) l2Cache).heapSize();
}
return onHeapCache.heapSize() + l2size;
}
示例15: testPutAndCheckAndPutInParallel
import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
* Test written as a verifier for HBASE-7051, CheckAndPut should properly read
* MVCC.
*
* Moved into TestAtomicOperation from its original location, TestHBase7051
*/
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()))
.addFamily(new HColumnDescriptor(family));
this.region = TEST_UTIL.createLocalHRegion(htd, null, null);
Put[] puts = new Put[1];
Put put = new Put(Bytes.toBytes("r1"));
put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
puts[0] = put;
region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
MultithreadedTestUtil.TestContext ctx =
new MultithreadedTestUtil.TestContext(conf);
ctx.addThread(new PutThread(ctx, region));
ctx.addThread(new CheckAndPutThread(ctx, region));
ctx.startThreads();
while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
Thread.sleep(100);
}
ctx.stop();
Scan s = new Scan();
RegionScanner scanner = region.getScanner(s);
List<Cell> results = new ArrayList<>();
ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
scanner.next(results, scannerContext);
for (Cell keyValue : results) {
assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
}
}