本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.CacheStats类的典型用法代码示例。如果您正苦于以下问题:Java CacheStats类的具体用法?Java CacheStats怎么用?Java CacheStats使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CacheStats类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了CacheStats类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: _testBlocksScanned
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flush(true);
CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
long before = stats.getHitCount() + stats.getMissCount();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
scan.addColumn(FAMILY, COL);
scan.setMaxVersions(1);
InternalScanner s = r.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
while (s.next(results))
;
s.close();
int expectResultSize = 'z' - 'a';
assertEquals(expectResultSize, results.size());
int kvPerBlock = (int) Math.ceil(BLOCK_SIZE /
(double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
Assert.assertEquals(2, kvPerBlock);
long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
long expectIndexBlockRead = expectDataBlockRead;
assertEquals(expectIndexBlockRead+expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
示例2: SingleSizeCache
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
/**
* Default constructor. Specify the size of the blocks, number of blocks, and
* the SlabCache this cache will be assigned to.
*
*
* @param blockSize the size of each block, in bytes
*
* @param numBlocks the number of blocks of blockSize this cache will hold.
*
* @param master the SlabCache this SingleSlabCache is assigned to.
*/
public SingleSizeCache(int blockSize, int numBlocks,
SlabItemActionWatcher master) {
this.blockSize = blockSize;
this.numBlocks = numBlocks;
backingStore = new Slab(blockSize, numBlocks);
this.stats = new CacheStats();
this.actionWatcher = master;
this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize());
this.timeSinceLastAccess = new AtomicLong();
// This evictionListener is called whenever the cache automatically
// evicts
// something.
RemovalListener<BlockCacheKey, CacheablePair> listener =
new RemovalListener<BlockCacheKey, CacheablePair>() {
@Override
public void onRemoval(
RemovalNotification<BlockCacheKey, CacheablePair> notification) {
if (!notification.wasEvicted()) {
// Only process removals by eviction, not by replacement or
// explicit removal
return;
}
CacheablePair value = notification.getValue();
timeSinceLastAccess.set(System.nanoTime()
- value.recentlyAccessed.get());
stats.evict();
doEviction(notification.getKey(), value);
}
};
backingMap = CacheBuilder.newBuilder()
.maximumSize(numBlocks - 1)
.removalListener(listener)
.<BlockCacheKey, CacheablePair>build()
.asMap();
}
示例3: SlabCache
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
/**
* Default constructor, creates an empty SlabCache.
*
* @param size Total size allocated to the SlabCache. (Bytes)
* @param avgBlockSize Average size of a block being cached.
**/
public SlabCache(long size, long avgBlockSize) {
this.avgBlockSize = avgBlockSize;
this.size = size;
this.stats = new CacheStats();
this.requestStats = new SlabStats();
this.successfullyCachedStats = new SlabStats();
backingStore = new ConcurrentHashMap<BlockCacheKey, SingleSizeCache>();
sizer = new TreeMap<Integer, SingleSizeCache>();
this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
STAT_THREAD_PERIOD_SECS, STAT_THREAD_PERIOD_SECS, TimeUnit.SECONDS);
}
示例4: _testBlocksScanned
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
HRegion r = createNewHRegion(table, START_KEY, END_KEY,
TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flushcache();
CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
long before = stats.getHitCount() + stats.getMissCount();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
scan.addColumn(FAMILY, COL);
scan.setMaxVersions(1);
InternalScanner s = r.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
while (s.next(results));
s.close();
int expectResultSize = 'z' - 'a';
assertEquals(expectResultSize, results.size());
int kvPerBlock = (int) Math.ceil(BLOCK_SIZE /
(double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
Assert.assertEquals(2, kvPerBlock);
long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
long expectIndexBlockRead = expectDataBlockRead;
assertEquals(expectIndexBlockRead+expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
示例5: SingleSizeCache
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
/**
* Default constructor. Specify the size of the blocks, number of blocks, and
* the SlabCache this cache will be assigned to.
*
*
* @param blockSize the size of each block, in bytes
*
* @param numBlocks the number of blocks of blockSize this cache will hold.
*
* @param master the SlabCache this SingleSlabCache is assigned to.
*/
public SingleSizeCache(int blockSize, int numBlocks,
SlabItemActionWatcher master) {
this.blockSize = blockSize;
this.numBlocks = numBlocks;
backingStore = new Slab(blockSize, numBlocks);
this.stats = new CacheStats();
this.actionWatcher = master;
this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize());
this.timeSinceLastAccess = new AtomicLong();
// This evictionListener is called whenever the cache automatically
// evicts something.
RemovalListener<BlockCacheKey, CacheablePair> listener =
new RemovalListener<BlockCacheKey, CacheablePair>() {
@Override
public void onRemoval(
RemovalNotification<BlockCacheKey, CacheablePair> notification) {
if (!notification.wasEvicted()) {
// Only process removals by eviction, not by replacement or
// explicit removal
return;
}
CacheablePair value = notification.getValue();
timeSinceLastAccess.set(System.nanoTime()
- value.recentlyAccessed.get());
stats.evict();
doEviction(notification.getKey(), value);
}
};
backingMap = CacheBuilder.newBuilder()
.maximumSize(numBlocks - 1)
.removalListener(listener)
.<BlockCacheKey, CacheablePair>build()
.asMap();
}
示例6: _testBlocksScanned
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
HRegion r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flush(true);
CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
long before = stats.getHitCount() + stats.getMissCount();
// Do simple test of getting one row only first.
Scan scan = new Scan().withStartRow(Bytes.toBytes("aaa")).withStopRow(Bytes.toBytes("aaz"))
.setReadType(Scan.ReadType.PREAD);
scan.addColumn(FAMILY, COL);
scan.setMaxVersions(1);
InternalScanner s = r.getScanner(scan);
List<Cell> results = new ArrayList<>();
while (s.next(results));
s.close();
int expectResultSize = 'z' - 'a';
assertEquals(expectResultSize, results.size());
int kvPerBlock = (int) Math.ceil(BLOCK_SIZE /
(double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
Assert.assertEquals(2, kvPerBlock);
long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
long expectIndexBlockRead = expectDataBlockRead;
assertEquals(expectIndexBlockRead+expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
示例7: SingleSizeCache
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
/**
* Default constructor. Specify the size of the blocks, number of blocks, and
* the SlabCache this cache will be assigned to.
*
*
* @param blockSize the size of each block, in bytes
*
* @param numBlocks the number of blocks of blockSize this cache will hold.
*
* @param master the SlabCache this SingleSlabCache is assigned to.
*/
public SingleSizeCache(int blockSize, int numBlocks,
SlabItemActionWatcher master) {
this.blockSize = blockSize;
this.numBlocks = numBlocks;
backingStore = new Slab(blockSize, numBlocks);
this.stats = new CacheStats();
this.actionWatcher = master;
this.size = new AtomicLong(CACHE_FIXED_OVERHEAD + backingStore.heapSize());
this.timeSinceLastAccess = new AtomicLong();
// This evictionListener is called whenever the cache automatically
// evicts
// something.
MapEvictionListener<BlockCacheKey, CacheablePair> listener = new MapEvictionListener<BlockCacheKey, CacheablePair>() {
@Override
public void onEviction(BlockCacheKey key, CacheablePair value) {
timeSinceLastAccess.set(System.nanoTime()
- value.recentlyAccessed.get());
stats.evict();
doEviction(key, value);
}
};
backingMap = new MapMaker().maximumSize(numBlocks - 1)
.evictionListener(listener).makeMap();
}
示例8: OnHeapBlockCache
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
/**
* Configurable constructor. Use this constructor if not using defaults.
* @param maxSize maximum size of this cache, in bytes
* @param blockSize expected average size of blocks, in bytes
* @param evictionThread whether to run evictions in a bg thread or not
* @param mapInitialSize initial size of backing ConcurrentHashMap
* @param mapLoadFactor initial load factor of backing ConcurrentHashMap
* @param mapConcurrencyLevel initial concurrency factor for backing CHM
* @param minFactor percentage of total size that eviction will evict until
* @param acceptableFactor percentage of total size that triggers eviction
* @param singleFactor percentage of total size for single-access blocks
* @param multiFactor percentage of total size for multiple-access blocks
* @param memoryFactor percentage of total size for in-memory blocks
*/
public OnHeapBlockCache(long maxSize, long blockSize, boolean evictionThread,
int mapInitialSize, float mapLoadFactor, int mapConcurrencyLevel,
float minFactor, float acceptableFactor,
float singleFactor, float multiFactor, float memoryFactor) {
if(singleFactor + multiFactor + memoryFactor != 1) {
throw new IllegalArgumentException("Single, multi, and memory factors " +
" should total 1.0");
}
if(minFactor >= acceptableFactor) {
throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor");
}
if(minFactor >= 1.0f || acceptableFactor >= 1.0f) {
throw new IllegalArgumentException("all factors must be < 1");
}
this.maxSize = maxSize;
this.blockSize = blockSize;
map = new ConcurrentHashMap<BlockCacheKey,CachedBlock>(mapInitialSize,
mapLoadFactor, mapConcurrencyLevel);
this.minFactor = minFactor;
this.acceptableFactor = acceptableFactor;
this.singleFactor = singleFactor;
this.multiFactor = multiFactor;
this.memoryFactor = memoryFactor;
this.stats = new CacheStats();
this.count = new AtomicLong(0);
this.elements = new AtomicLong(0);
this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
this.size = new AtomicLong(this.overhead);
if(evictionThread) {
this.evictionThread = new EvictionThread(this);
this.evictionThread.start(); // FindBugs SC_START_IN_CTOR
} else {
this.evictionThread = null;
}
}
示例9: SlabCache
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
/**
* Default constructor, creates an empty SlabCache.
*
* @param size Total size allocated to the SlabCache. (Bytes)
* @param avgBlockSize Average size of a block being cached.
**/
public SlabCache(long size, long avgBlockSize) {
this.avgBlockSize = avgBlockSize;
this.size = size;
this.stats = new CacheStats();
this.requestStats = new SlabStats();
this.successfullyCachedStats = new SlabStats();
backingStore = new ConcurrentHashMap<BlockCacheKey, SingleSizeCache>();
sizer = new TreeMap<Integer, SingleSizeCache>();
this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
STAT_THREAD_PERIOD_SECS, STAT_THREAD_PERIOD_SECS, TimeUnit.SECONDS);
}
示例10: getStats
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
@Override
public CacheStats getStats() {
return cacheStats;
}
示例11: getStats
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
@Override
public CacheStats getStats() {
return this.stats;
}
示例12: getStats
import org.apache.hadoop.hbase.io.hfile.CacheStats; //导入依赖的package包/类
public CacheStats getStats() {
return this.stats;
}