当前位置: 首页>>代码示例>>Java>>正文


Java BucketCache类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.bucket.BucketCache的典型用法代码示例。如果您正苦于以下问题:Java BucketCache类的具体用法?Java BucketCache怎么用?Java BucketCache使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


BucketCache类属于org.apache.hadoop.hbase.io.hfile.bucket包,在下文中一共展示了BucketCache类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: evictBlock

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
/**
 * Evict the block, and it will be cached by the victim handler if exists &&
 * block may be read again later
 * @param block
 * @param evictedByEvictionProcess true if the given block is evicted by
 *          EvictionThread
 * @return the heap size of evicted block
 */
protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) {
  map.remove(block.getCacheKey());
  updateSizeMetrics(block, true);
  long val = elements.decrementAndGet();
  if (LOG.isTraceEnabled()) {
    long size = map.size();
    assertCounterSanity(size, val);
  }
  stats.evicted(block.getCachedTime(), block.getCacheKey().isPrimary());
  if (evictedByEvictionProcess && victimHandler != null) {
    if (victimHandler instanceof BucketCache) {
      boolean wait = getCurrentSize() < acceptableSize();
      boolean inMemory = block.getPriority() == BlockPriority.MEMORY;
      ((BucketCache)victimHandler).cacheBlockWithWait(block.getCacheKey(), block.getBuffer(),
          inMemory, wait);
    } else {
      victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer());
    }
  }
  return block.heapSize();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:LruBlockCache.java

示例2: doBucketCacheConfigTest

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private void doBucketCacheConfigTest() {
  final int bcSize = 100;
  this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
  CacheConfig cc = new CacheConfig(this.conf);
  basicBlockCacheOps(cc, false, false);
  assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
  // TODO: Assert sizes allocated are right and proportions.
  CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache();
  BlockCache [] bcs = cbc.getBlockCaches();
  assertTrue(bcs[0] instanceof LruBlockCache);
  LruBlockCache lbc = (LruBlockCache)bcs[0];
  assertEquals(CacheConfig.getLruCacheSize(this.conf,
      ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()), lbc.getMaxSize());
  assertTrue(bcs[1] instanceof BucketCache);
  BucketCache bc = (BucketCache)bcs[1];
  // getMaxSize comes back in bytes but we specified size in MB
  assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCacheConfig.java

示例3: getBlockCaches

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private static List<BlockCache> getBlockCaches() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  List<BlockCache> blockcaches = new ArrayList<BlockCache>();
  // default
  blockcaches.add(new CacheConfig(conf).getBlockCache());

  // memory
  BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
  blockcaches.add(lru);

  // bucket cache
  FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir());
  int[] bucketSizes =
      { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 };
  BlockCache bucketcache =
      new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
  blockcaches.add(bucketcache);
  return blockcaches;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestCacheOnWrite.java

示例4: getBlockCaches

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private static List<BlockCache> getBlockCaches() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  List<BlockCache> blockcaches = new ArrayList<BlockCache>();
  // default
  blockcaches.add(new CacheConfig(conf).getBlockCache());

  // memory
  BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
  blockcaches.add(lru);

  // bucket cache
  FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir());
  int[] bucketSizes = {INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024 };
  BlockCache bucketcache =
      new BucketCache("file:" + TEST_UTIL.getDataTestDir() + "/bucket.data",
          128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
  blockcaches.add(bucketcache);
  return blockcaches;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:20,代码来源:TestCacheOnWrite.java

示例5: doBucketCacheConfigTest

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private void doBucketCacheConfigTest() {
  final int bcSize = 100;
  this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
  CacheConfig cc = new CacheConfig(this.conf);
  basicBlockCacheOps(cc, false, false);
  assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
  // TODO: Assert sizes allocated are right and proportions.
  CombinedBlockCache cbc = (CombinedBlockCache)cc.getBlockCache();
  BlockCache [] bcs = cbc.getBlockCaches();
  assertTrue(bcs[0] instanceof LruBlockCache);
  LruBlockCache lbc = (LruBlockCache)bcs[0];
  assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize());
  assertTrue(bcs[1] instanceof BucketCache);
  BucketCache bc = (BucketCache)bcs[1];
  // getMaxSize comes back in bytes but we specified size in MB
  assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024));
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestCacheConfig.java

示例6: getBlockCaches

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private static List<BlockCache> getBlockCaches() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  List<BlockCache> blockcaches = new ArrayList<>();
  // default
  blockcaches.add(new CacheConfig(conf).getBlockCache());

  //set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287
  TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 2.0f);
  // memory
  BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
  blockcaches.add(lru);

  // bucket cache
  FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir());
  int[] bucketSizes =
      { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 };
  BlockCache bucketcache =
      new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
  blockcaches.add(bucketcache);
  return blockcaches;
}
 
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:TestCacheOnWrite.java

示例7: instantiateBlockCache

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
/**
 * Returns the block cache or <code>null</code> in case none should be used.
 * Sets GLOBAL_BLOCK_CACHE_INSTANCE
 *
 * @param conf  The current configuration.
 * @return The block cache or <code>null</code>.
 */
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
  if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
  if (blockCacheDisabled) return null;
  MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
  LruBlockCache l1 = getL1(conf, mu);
  // blockCacheDisabled is set as a side-effect of getL1(), so check it again after the call.
  if (blockCacheDisabled) return null;
  BucketCache l2 = getL2(conf, mu);
  if (l2 == null) {
    GLOBAL_BLOCK_CACHE_INSTANCE = l1;
  } else {
    boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
      DEFAULT_BUCKET_CACHE_COMBINED);
    if (combinedWithLru) {
      GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2);
    } else {
      // L1 and L2 are not 'combined'.  They are connected via the LruBlockCache victimhandler
      // mechanism.  It is a little ugly but works according to the following: when the
      // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get
      // a block from the L1 cache, if not in L1, we will search L2.
      l1.setVictimCache(l2);
      GLOBAL_BLOCK_CACHE_INSTANCE = l1;
    }
  }
  return GLOBAL_BLOCK_CACHE_INSTANCE;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:34,代码来源:CacheConfig.java

示例8: iterateBlockCache

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private void iterateBlockCache(BlockCache cache, Iterator<CachedBlock> iterator) {
  int refCount;
  while (iterator.hasNext()) {
    CachedBlock next = iterator.next();
    BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
    if (cache instanceof BucketCache) {
      refCount = ((BucketCache) cache).getRefCount(cacheKey);
    } else if (cache instanceof CombinedBlockCache) {
      refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
    } else {
      continue;
    }
    assertEquals(0, refCount);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:16,代码来源:TestBlockEvictionFromClient.java

示例9: testBucketCacheConfigL1L2Setup

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
/**
 * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy
 * LruBlockCache as L1 with a BucketCache for L2.
 */
@Test (timeout=10000)
public void testBucketCacheConfigL1L2Setup() {
  this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
  // Make lru size is smaller than bcSize for sure.  Need this to be true so when eviction
  // from L1 happens, it does not fail because L2 can't take the eviction because block too big.
  this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
  MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
  long lruExpectedSize = CacheConfig.getLruCacheSize(this.conf, mu);
  final int bcSize = 100;
  long bcExpectedSize = 100 * 1024 * 1024; // MB.
  assertTrue(lruExpectedSize < bcExpectedSize);
  this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
  this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false);
  CacheConfig cc = new CacheConfig(this.conf);
  basicBlockCacheOps(cc, false, false);
  assertTrue(cc.getBlockCache() instanceof LruBlockCache);
  // TODO: Assert sizes allocated are right and proportions.
  LruBlockCache lbc = (LruBlockCache)cc.getBlockCache();
  assertEquals(lruExpectedSize, lbc.getMaxSize());
  BlockCache bc = lbc.getVictimHandler();
  // getMaxSize comes back in bytes but we specified size in MB
  assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize());
  // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2.
  long initialL1BlockCount = lbc.getBlockCount();
  long initialL2BlockCount = bc.getBlockCount();
  Cacheable c = new DataCacheEntry();
  BlockCacheKey bck = new BlockCacheKey("bck", 0);
  lbc.cacheBlock(bck, c, false, false);
  assertEquals(initialL1BlockCount + 1, lbc.getBlockCount());
  assertEquals(initialL2BlockCount, bc.getBlockCount());
  // Force evictions by putting in a block too big.
  final long justTooBigSize = lbc.acceptableSize() + 1;
  lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() {
    @Override
    public long heapSize() {
      return justTooBigSize;
    }

    @Override
    public int getSerializedLength() {
      return (int)heapSize();
    }
  });
  // The eviction thread in lrublockcache needs to run.
  while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10);
  assertEquals(initialL1BlockCount, lbc.getBlockCount());
  long count = bc.getBlockCount();
  assertTrue(initialL2BlockCount + 1 <= count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestCacheConfig.java

示例10: setVictimCache

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
public void setVictimCache(BucketCache handler) {
  assert victimHandler == null;
  victimHandler = handler;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:5,代码来源:LruBlockCache.java

示例11: getVictimHandler

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
BucketCache getVictimHandler() {
  return this.victimHandler;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:4,代码来源:LruBlockCache.java

示例12: CombinedBlockCache

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
public CombinedBlockCache(LruBlockCache lruCache, BucketCache bucketCache) {
  this.lruCache = lruCache;
  this.bucketCache = bucketCache;
  this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(),
      bucketCache.getStats());
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:7,代码来源:CombinedBlockCache.java

示例13: testBucketCacheConfigL1L2Setup

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
/**
 * Assert that when BUCKET_CACHE_COMBINED_KEY is false, the non-default, that we deploy
 * LruBlockCache as L1 with a BucketCache for L2.
 */
@Test (timeout=10000)
public void testBucketCacheConfigL1L2Setup() {
  this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
  // Make lru size is smaller than bcSize for sure.  Need this to be true so when eviction
  // from L1 happens, it does not fail because L2 can't take the eviction because block too big.
  this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
  MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
  long lruExpectedSize = CacheConfig.getLruCacheSize(this.conf, mu);
  final int bcSize = 100;
  long bcExpectedSize = 100 * 1024 * 1024; // MB.
  assertTrue(lruExpectedSize < bcExpectedSize);
  this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
  this.conf.setBoolean(CacheConfig.BUCKET_CACHE_COMBINED_KEY, false);
  CacheConfig cc = new CacheConfig(this.conf);
  basicBlockCacheOps(cc, false, false);
  assertTrue(cc.getBlockCache() instanceof LruBlockCache);
  // TODO: Assert sizes allocated are right and proportions.
  LruBlockCache lbc = (LruBlockCache)cc.getBlockCache();
  assertEquals(lruExpectedSize, lbc.getMaxSize());
  BucketCache bc = lbc.getVictimHandler();
  // getMaxSize comes back in bytes but we specified size in MB
  assertEquals(bcExpectedSize, bc.getMaxSize());
  // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2.
  long initialL1BlockCount = lbc.getBlockCount();
  long initialL2BlockCount = bc.getBlockCount();
  Cacheable c = new DataCacheEntry();
  BlockCacheKey bck = new BlockCacheKey("bck", 0);
  lbc.cacheBlock(bck, c, false, false);
  assertEquals(initialL1BlockCount + 1, lbc.getBlockCount());
  assertEquals(initialL2BlockCount, bc.getBlockCount());
  // Force evictions by putting in a block too big.
  final long justTooBigSize = lbc.acceptableSize() + 1;
  lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() {
    @Override
    public long heapSize() {
      return justTooBigSize;
    }

    @Override
    public int getSerializedLength() {
      return (int)heapSize();
    }
  });
  // The eviction thread in lrublockcache needs to run.
  while (initialL1BlockCount != lbc.getBlockCount()) Threads.sleep(10);
  assertEquals(initialL1BlockCount, lbc.getBlockCount());
  long count = bc.getBlockCount();
  assertTrue(initialL2BlockCount + 1 <= count);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:54,代码来源:TestCacheConfig.java

示例14: getRefCount

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
@VisibleForTesting
public int getRefCount(BlockCacheKey cacheKey) {
  return (this.l2Cache instanceof BucketCache)
      ? ((BucketCache) this.l2Cache).getRefCount(cacheKey) : 0;
}
 
开发者ID:apache,项目名称:hbase,代码行数:6,代码来源:CombinedBlockCache.java

示例15: checkForBlockEviction

import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; //导入依赖的package包/类
private void checkForBlockEviction(BlockCache cache, boolean getClosed, boolean expectOnlyZero)
    throws InterruptedException {
  int counter = NO_OF_THREADS;
  if (CustomInnerRegionObserver.waitForGets.get()) {
    // Because only one row is selected, it has only 2 blocks
    counter = counter - 1;
    while (CustomInnerRegionObserver.countOfGets.get() < NO_OF_THREADS) {
      Thread.sleep(100);
    }
  } else {
    while (CustomInnerRegionObserver.countOfNext.get() < NO_OF_THREADS) {
      Thread.sleep(100);
    }
  }
  Iterator<CachedBlock> iterator = cache.iterator();
  int refCount = 0;
  while (iterator.hasNext()) {
    CachedBlock next = iterator.next();
    BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
    if (cache instanceof BucketCache) {
      refCount = ((BucketCache) cache).getRefCount(cacheKey);
    } else if (cache instanceof CombinedBlockCache) {
      refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
    } else {
      continue;
    }
    System.out.println(" the refcount is " + refCount + " block is " + cacheKey);
    if (CustomInnerRegionObserver.waitForGets.get()) {
      if (expectOnlyZero) {
        assertTrue(refCount == 0);
      }
      if (refCount != 0) {
        // Because the scan would have also touched up on these blocks but
        // it
        // would have touched
        // all 3
        if (getClosed) {
          // If get has closed only the scan's blocks would be available
          assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get());
        } else {
            assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get() + (NO_OF_THREADS));
        }
      }
    } else {
      // Because the get would have also touched up on these blocks but it
      // would have touched
      // upon only 2 additionally
      if (expectOnlyZero) {
        assertTrue(refCount == 0);
      }
      if (refCount != 0) {
        if (getLatch == null) {
          assertEquals(refCount, CustomInnerRegionObserver.countOfNext.get());
        } else {
          assertEquals(refCount, CustomInnerRegionObserver.countOfNext.get() + (NO_OF_THREADS));
        }
      }
    }
  }
  CustomInnerRegionObserver.getCdl().get().countDown();
}
 
开发者ID:apache,项目名称:hbase,代码行数:62,代码来源:TestBlockEvictionFromClient.java


注:本文中的org.apache.hadoop.hbase.io.hfile.bucket.BucketCache类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。