本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.BlockCacheKey类的典型用法代码示例。如果您正苦于以下问题:Java BlockCacheKey类的具体用法?Java BlockCacheKey怎么用?Java BlockCacheKey使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockCacheKey类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了BlockCacheKey类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: CachedEntryQueue
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* @param maxSize the target size of elements in the queue
* @param blockSize expected average size of blocks
*/
public CachedEntryQueue(long maxSize, long blockSize) {
int initialSize = (int) (maxSize / blockSize);
if (initialSize == 0) {
initialSize++;
}
queue = MinMaxPriorityQueue.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
Entry<BlockCacheKey, BucketEntry> entry2) {
return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue());
}
}).expectedSize(initialSize).create();
cacheSize = 0;
this.maxSize = maxSize;
}
示例2: add
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Attempt to add the specified entry to this queue.
* <p>
* If the queue is smaller than the max size, or if the specified element is
* ordered after the smallest element in the queue, the element will be added
* to the queue. Otherwise, there is no side effect of this call.
* @param entry a bucket entry with key to try to add to the queue
*/
public void add(Map.Entry<BlockCacheKey, BucketEntry> entry) {
if (cacheSize < maxSize) {
queue.add(entry);
cacheSize += entry.getValue().getLength();
} else {
BucketEntry head = queue.peek().getValue();
if (BucketEntry.COMPARATOR.compare(entry.getValue(), head) > 0) {
cacheSize += entry.getValue().getLength();
cacheSize -= head.getLength();
if (cacheSize > maxSize) {
queue.poll();
} else {
cacheSize += head.getLength();
}
queue.add(entry);
}
}
}
示例3: evictBlocksByHfileName
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Evicts all blocks for a specific HFile.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
// Copy the list to avoid ConcurrentModificationException
// as evictBlockKey removes the key from the index
Set<BlockCacheKey> keySet = blocksByHFile.values(hfileName);
if (keySet == null) {
return 0;
}
int numEvicted = 0;
List<BlockCacheKey> keysForHFile = ImmutableList.copyOf(keySet);
for (BlockCacheKey key : keysForHFile) {
if (evictBlock(key)) {
++numEvicted;
}
}
return numEvicted;
}
示例4: setUp
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Set up variables and get BucketCache and WriterThread into state where tests can manually
* control the running of WriterThread and BucketCache is empty.
* @throws Exception
*/
@Before
public void setUp() throws Exception {
// Arbitrary capacity.
final int capacity = 16;
// Run with one writer thread only. Means there will be one writer queue only too. We depend
// on this in below.
final int writerThreadsCount = 1;
this.bc = new MockBucketCache("heap", capacity, 1, new int [] {1}, writerThreadsCount,
capacity, null, 100/*Tolerate ioerrors for 100ms*/);
assertEquals(writerThreadsCount, bc.writerThreads.length);
assertEquals(writerThreadsCount, bc.writerQueues.size());
// Get reference to our single WriterThread instance.
this.wt = bc.writerThreads[0];
this.q = bc.writerQueues.get(0);
wt.disableWriter();
this.plainKey = new BlockCacheKey("f", 0);
this.plainCacheable = Mockito.mock(Cacheable.class);
assertThat(bc.ramCache.isEmpty(), is(true));
assertTrue(q.isEmpty());
}
示例5: getBlock
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) {
CacheablePair contentBlock = backingMap.get(key);
if (contentBlock == null) {
if (!repeat) stats.miss(caching);
return null;
}
stats.hit(caching);
// If lock cannot be obtained, that means we're undergoing eviction.
try {
contentBlock.recentlyAccessed.set(System.nanoTime());
synchronized (contentBlock) {
if (contentBlock.serializedData == null) {
// concurrently evicted
LOG.warn("Concurrent eviction of " + key);
return null;
}
return contentBlock.deserializer
.deserialize(contentBlock.serializedData.asReadOnlyBuffer());
}
} catch (Throwable t) {
LOG.error("Deserializer threw an exception. This may indicate a bug.", t);
return null;
}
}
示例6: cacheBlock
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Cache the block with the specified key and buffer. First finds what size
* SingleSlabCache it should fit in. If the block doesn't fit in any, it will
* return without doing anything.
* <p>
* It is assumed this will NEVER be called on an already cached block. If that
* is done, it is assumed that you are reinserting the same exact block due to
* a race condition, and will throw a runtime exception.
*
* @param cacheKey block cache key
* @param cachedItem block buffer
*/
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem) {
Entry<Integer, SingleSizeCache> scacheEntry = getHigherBlock(cachedItem
.getSerializedLength());
this.requestStats.addin(cachedItem.getSerializedLength());
if (scacheEntry == null) {
return; // we can't cache, something too big.
}
this.successfullyCachedStats.addin(cachedItem.getSerializedLength());
SingleSizeCache scache = scacheEntry.getValue();
/*
* This will throw a runtime exception if we try to cache the same value
* twice
*/
scache.cacheBlock(cacheKey, cachedItem);
}
示例7: getBlock
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Get the buffer of the block with the specified name.
* @param caching
* @param key
* @param repeat
*
* @return buffer of specified block name, or null if not in cache
*/
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat) {
SingleSizeCache cachedBlock = backingStore.get(key);
if (cachedBlock == null) {
if (!repeat) stats.miss(caching);
return null;
}
Cacheable contentBlock = cachedBlock.getBlock(key, caching, false);
if (contentBlock != null) {
stats.hit(caching);
} else {
if (!repeat) stats.miss(caching);
}
return contentBlock;
}
示例8: CachedEntryQueue
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* @param maxSize the target size of elements in the queue
* @param blockSize expected average size of blocks
*/
public CachedEntryQueue(long maxSize, long blockSize) {
int initialSize = (int) (maxSize / blockSize);
if (initialSize == 0)
initialSize++;
queue = MinMaxPriorityQueue
.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
Entry<BlockCacheKey, BucketEntry> entry2) {
return entry1.getValue().compareTo(entry2.getValue());
}
}).expectedSize(initialSize).create();
cacheSize = 0;
this.maxSize = maxSize;
}
示例9: add
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Attempt to add the specified entry to this queue.
*
* <p>
* If the queue is smaller than the max size, or if the specified element is
* ordered after the smallest element in the queue, the element will be added
* to the queue. Otherwise, there is no side effect of this call.
* @param entry a bucket entry with key to try to add to the queue
*/
public void add(Map.Entry<BlockCacheKey, BucketEntry> entry) {
if (cacheSize < maxSize) {
queue.add(entry);
cacheSize += entry.getValue().getLength();
} else {
BucketEntry head = queue.peek().getValue();
if (entry.getValue().compareTo(head) > 0) {
cacheSize += entry.getValue().getLength();
cacheSize -= head.getLength();
if (cacheSize > maxSize) {
queue.poll();
} else {
cacheSize += head.getLength();
}
queue.add(entry);
}
}
}
示例10: evictBlocksByHfileName
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Evicts all blocks for a specific HFile.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
// Copy the list to avoid ConcurrentModificationException
// as evictBlockKey removes the key from the index
Set<BlockCacheKey> keySet = blocksByHFile.values(hfileName);
if (keySet == null) {
return 0;
}
int numEvicted = 0;
List<BlockCacheKey> keysForHFile = ImmutableList.copyOf(keySet);
for (BlockCacheKey key : keysForHFile) {
if (evictBlock(key)) {
++numEvicted;
}
}
return numEvicted;
}
示例11: getBlock
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Get the buffer of the block with the specified name.
*
* @return buffer of specified block name, or null if not in cache
*/
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
SingleSizeCache cachedBlock = backingStore.get(key);
if (cachedBlock == null) {
if (!repeat) stats.miss(caching);
return null;
}
Cacheable contentBlock = cachedBlock.getBlock(key, caching, false, updateCacheMetrics);
if (contentBlock != null) {
if (updateCacheMetrics) stats.hit(caching);
} else if (!repeat) {
if (updateCacheMetrics) stats.miss(caching);
}
return contentBlock;
}
示例12: evictBlocksByHfileName
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Evicts all blocks for a specific HFile.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
Set<BlockCacheKey> keySet = blocksByHFile.subSet(
new BlockCacheKey(hfileName, Long.MIN_VALUE), true,
new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
int numEvicted = 0;
for (BlockCacheKey key : keySet) {
if (evictBlock(key)) {
++numEvicted;
}
}
return numEvicted;
}
示例13: setUp
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Set up variables and get BucketCache and WriterThread into state where tests can manually
* control the running of WriterThread and BucketCache is empty.
* @throws Exception
*/
@Before
public void setUp() throws Exception {
// Arbitrary capacity.
final int capacity = 16;
// Run with one writer thread only. Means there will be one writer queue only too. We depend
// on this in below.
final int writerThreadsCount = 1;
this.bc = new MockBucketCache("offheap", capacity, 1, new int [] {1}, writerThreadsCount,
capacity, null, 100/*Tolerate ioerrors for 100ms*/);
assertEquals(writerThreadsCount, bc.writerThreads.length);
assertEquals(writerThreadsCount, bc.writerQueues.size());
// Get reference to our single WriterThread instance.
this.wt = bc.writerThreads[0];
this.q = bc.writerQueues.get(0);
wt.disableWriter();
this.plainKey = new BlockCacheKey("f", 0);
this.plainCacheable = Mockito.mock(Cacheable.class);
assertThat(bc.ramCache.isEmpty(), is(true));
assertTrue(q.isEmpty());
}
示例14: getBlock
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching) {
CacheablePair contentBlock = backingMap.get(key);
if (contentBlock == null) {
stats.miss(caching);
return null;
}
stats.hit(caching);
// If lock cannot be obtained, that means we're undergoing eviction.
try {
contentBlock.recentlyAccessed.set(System.nanoTime());
synchronized (contentBlock) {
if (contentBlock.serializedData == null) {
// concurrently evicted
LOG.warn("Concurrent eviction of " + key);
return null;
}
return contentBlock.deserializer
.deserialize(contentBlock.serializedData.asReadOnlyBuffer());
}
} catch (Throwable t) {
LOG.error("Deserializer threw an exception. This may indicate a bug.", t);
return null;
}
}
示例15: evictBlocksByHfileName
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; //导入依赖的package包/类
/**
* Evicts all blocks for a specific HFile.
* <p>
* This is used for evict-on-close to remove all blocks of a specific HFile.
*
* @return the number of blocks evicted
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
// Copy the list to avoid ConcurrentModificationException
// as evictBlockKey removes the key from the index
Set<BlockCacheKey> keySet = blocksByHFile.values(hfileName);
if (keySet == null) {
return 0;
}
int numEvicted = 0;
List<BlockCacheKey> keysForHFile = ImmutableList.copyOf(keySet);
for (BlockCacheKey key : keysForHFile) {
if (evictBlock(key)) {
++numEvicted;
}
}
return numEvicted;
}