当前位置: 首页>>代码示例>>Java>>正文


Java HeapSize类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.HeapSize的典型用法代码示例。如果您正苦于以下问题:Java HeapSize类的具体用法?Java HeapSize怎么用?Java HeapSize使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HeapSize类属于org.apache.hadoop.hbase.io包,在下文中一共展示了HeapSize类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testHeapSizeChanges

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Just checks if heapsize grows when something is cached, and gets smaller
 * when the same object is evicted
 */

public static void testHeapSizeChanges(final BlockCache toBeTested,
    final int blockSize) {
  HFileBlockPair[] blocks = generateHFileBlocks(blockSize, 1);
  long heapSize = ((HeapSize) toBeTested).heapSize();
  toBeTested.cacheBlock(blocks[0].blockName, blocks[0].block);

  /*When we cache something HeapSize should always increase */
  assertTrue(heapSize < ((HeapSize) toBeTested).heapSize());

  toBeTested.evictBlock(blocks[0].blockName);

  /*Post eviction, heapsize should be the same */
  assertEquals(heapSize, ((HeapSize) toBeTested).heapSize());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:CacheTestUtils.java

示例2: testEncodingWithCacheInternals

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestHFileDataBlockEncoder.java

示例3: testEncodingWithCache

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024, TEST_UTIL.getConfiguration());
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:TestHFileDataBlockEncoder.java

示例4: testEncodingWithCacheInternals

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  HFileBlock block = getSampleHFileBlock(useTag);
  HFileBlock cacheBlock = createBlockOnDisk(block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:26,代码来源:TestHFileDataBlockEncoder.java

示例5: testHeapSize

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Test
public void testHeapSize() {
  Cell originalCell = CellUtil.createCell(Bytes.toBytes("row"), Bytes.toBytes("value"));
  final int fakeTagArrayLength = 10;
  Cell trCell = PrivateCellUtil.createCell(originalCell, new byte[fakeTagArrayLength]);

  // Get the heapSize before the internal tags array in trCell are nuked
  long trCellHeapSize = ((HeapSize)trCell).heapSize();

  // Make another TagRewriteCell with the original TagRewriteCell
  // This happens on systems with more than one RegionObserver/Coproc loaded (such as
  // VisibilityController and AccessController)
  Cell trCell2 = PrivateCellUtil.createCell(trCell, new byte[fakeTagArrayLength]);

  assertTrue("TagRewriteCell containing a TagRewriteCell's heapsize should be larger than a " +
      "single TagRewriteCell's heapsize", trCellHeapSize < ((HeapSize)trCell2).heapSize());
  assertTrue("TagRewriteCell should have had nulled out tags array", ((HeapSize)trCell).heapSize() <
      trCellHeapSize);
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:TestTagRewriteCell.java

示例6: testEncodingWithCacheInternals

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
private void testEncodingWithCacheInternals(boolean useTag) throws IOException {
  List<KeyValue> kvs = generator.generateTestKeyValues(60, useTag);
  HFileBlock block = getSampleHFileBlock(kvs, useTag);
  HFileBlock cacheBlock = createBlockOnDisk(kvs, block, useTag);

  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferReadOnly(), returnedBlock.getBufferReadOnly());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:TestHFileDataBlockEncoder.java

示例7: testEncodingWithCache

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 * @throws IOException 
 */
@Test
public void testEncodingWithCache() throws IOException {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  HFileBlock cacheBlock = createBlockOnDisk(block);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getDataBlockEncoding() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:31,代码来源:TestHFileDataBlockEncoder.java

示例8: testEncodingWithCache

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Test putting and taking out blocks into cache with different
 * encoding options.
 */
@Test
public void testEncodingWithCache() {
  HFileBlock block = getSampleHFileBlock();
  LruBlockCache blockCache =
      new LruBlockCache(8 * 1024 * 1024, 32 * 1024);
  HFileBlock cacheBlock = blockEncoder.diskToCacheFormat(block, false);
  BlockCacheKey cacheKey = new BlockCacheKey("test", 0);
  blockCache.cacheBlock(cacheKey, cacheBlock);

  HeapSize heapSize = blockCache.getBlock(cacheKey, false, false);
  assertTrue(heapSize instanceof HFileBlock);

  HFileBlock returnedBlock = (HFileBlock) heapSize;;

  if (blockEncoder.getEncodingInCache() ==
      DataBlockEncoding.NONE) {
    assertEquals(block.getBufferWithHeader(),
        returnedBlock.getBufferWithHeader());
  } else {
    if (BlockType.ENCODED_DATA != returnedBlock.getBlockType()) {
      System.out.println(blockEncoder);
    }
    assertEquals(BlockType.ENCODED_DATA, returnedBlock.getBlockType());
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:30,代码来源:TestHFileDataBlockEncoder.java

示例9: heapSize

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Override
public long heapSize() {
  long l2size = 0;
  if (l2Cache instanceof HeapSize) {
    l2size = ((HeapSize) l2Cache).heapSize();
  }
  return lruCache.heapSize() + l2size;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:CombinedBlockCache.java

示例10: testPutAndCheckAndPutInParallel

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC.
 *
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
  scanner.next(results, scannerContext);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestAtomicOperation.java

示例11: testRedundantRowKeys

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Test
@SuppressWarnings("unchecked")
public void testRedundantRowKeys() throws Exception {

  final int batchSize = 100000;

  String tableName = getClass().getSimpleName();
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName), tableName, conf, Bytes.toBytes("a"));

  List<Pair<Mutation, Integer>> someBatch = Lists.newArrayList();
  int i = 0;
  while (i < batchSize) {
    if (i % 2 == 0) {
      someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(0)), null));
    } else {
      someBatch.add(new Pair<Mutation, Integer>(new Put(Bytes.toBytes(1)), null));
    }
    i++;
  }
  long start = System.nanoTime();
  region.batchMutate(someBatch.toArray(new Pair[0]));
  long duration = System.nanoTime() - start;
  System.out.println("Batch mutate took: " + duration + "ns");
  assertEquals(2, region.getAcquiredLockCount());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:28,代码来源:TestBatchHRegionLockingAndWrites.java

示例12: testPutAndCheckAndPutInParallel

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = HBaseConfiguration.create();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TestHRegion.initHRegion(Bytes.toBytes(tableName),
      tableName, conf, Bytes.toBytes(family));

  List<Pair<Mutation, Integer>> putsAndLocks = Lists.newArrayList();
  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;
  Pair<Mutation, Integer> pair = new Pair<Mutation, Integer>(puts[0], null);

  putsAndLocks.add(pair);

  region.batchMutate(putsAndLocks.toArray(new Pair[0]));
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<KeyValue> results = new ArrayList<KeyValue>();
  scanner.next(results, 2);
  for (KeyValue keyValue : results) {
    assertEquals("50",Bytes.toString(keyValue.getValue()));
  }

}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:TestHBase7051.java

示例13: testPutAndCheckAndPutInParallel

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC. 
 * 
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {

  final String tableName = "testPutAndCheckAndPut";
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
      null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));

  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;
  
  region.batchMutate(puts);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<Cell>();
  scanner.next(results, 2);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }

}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:40,代码来源:TestAtomicOperation.java

示例14: heapSize

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
@Override
public long heapSize() {
  long l2size = 0;
  if (l2Cache instanceof HeapSize) {
    l2size = ((HeapSize) l2Cache).heapSize();
  }
  return onHeapCache.heapSize() + l2size;
}
 
开发者ID:apache,项目名称:hbase,代码行数:9,代码来源:CombinedBlockCache.java

示例15: testPutAndCheckAndPutInParallel

import org.apache.hadoop.hbase.io.HeapSize; //导入依赖的package包/类
/**
 * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
 * MVCC.
 *
 * Moved into TestAtomicOperation from its original location, TestHBase7051
 */
@Test
public void testPutAndCheckAndPutInParallel() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()))
      .addFamily(new HColumnDescriptor(family));
  this.region = TEST_UTIL.createLocalHRegion(htd, null, null);
  Put[] puts = new Put[1];
  Put put = new Put(Bytes.toBytes("r1"));
  put.addColumn(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
  puts[0] = put;

  region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
  MultithreadedTestUtil.TestContext ctx =
    new MultithreadedTestUtil.TestContext(conf);
  ctx.addThread(new PutThread(ctx, region));
  ctx.addThread(new CheckAndPutThread(ctx, region));
  ctx.startThreads();
  while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
    Thread.sleep(100);
  }
  ctx.stop();
  Scan s = new Scan();
  RegionScanner scanner = region.getScanner(s);
  List<Cell> results = new ArrayList<>();
  ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
  scanner.next(results, scannerContext);
  for (Cell keyValue : results) {
    assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:38,代码来源:TestAtomicOperation.java


注:本文中的org.apache.hadoop.hbase.io.HeapSize类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。