本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory类的典型用法代码示例。如果您正苦于以下问题:Java BlockCategory类的具体用法?Java BlockCategory怎么用?Java BlockCategory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockCategory类属于org.apache.hadoop.hbase.io.hfile.BlockType包,在下文中一共展示了BlockCategory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: shouldReadBlockFromCache
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Return true if we may find this type of block in block cache.
* <p/>
* TODO: today {@code family.isBlockCacheEnabled()} only means {@code cacheDataOnRead}, so here we
* consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in
* the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled}
* configuration.
*/
public boolean shouldReadBlockFromCache(BlockType blockType) {
if (!isBlockCacheEnabled()) {
return false;
}
if (cacheDataOnRead) {
return true;
}
if (prefetchOnOpen) {
return true;
}
if (cacheDataOnWrite) {
return true;
}
if (blockType == null) {
return true;
}
if (blockType.getCategory() == BlockCategory.BLOOM ||
blockType.getCategory() == BlockCategory.INDEX) {
return true;
}
return false;
}
示例2: shouldReadBlockFromCache
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Return true if we may find this type of block in block cache.
* <p>
* TODO: today {@code family.isBlockCacheEnabled()} only means {@code cacheDataOnRead}, so here we
* consider lots of other configurations such as {@code cacheDataOnWrite}. We should fix this in
* the future, {@code cacheDataOnWrite} should honor the CF level {@code isBlockCacheEnabled}
* configuration.
*/
public boolean shouldReadBlockFromCache(BlockType blockType) {
if (!isBlockCacheEnabled()) {
return false;
}
if (cacheDataOnRead) {
return true;
}
if (prefetchOnOpen) {
return true;
}
if (cacheDataOnWrite) {
return true;
}
if (blockType == null) {
return true;
}
if (blockType.getCategory() == BlockCategory.BLOOM ||
blockType.getCategory() == BlockCategory.INDEX) {
return true;
}
return false;
}
示例3: shouldCacheBlockOnRead
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Should we cache a block of a particular category? We always cache
* important blocks such as index blocks, as long as the block cache is
* available.
*/
public boolean shouldCacheBlockOnRead(BlockCategory category) {
return isBlockCacheEnabled()
&& (cacheDataOnRead ||
category == BlockCategory.INDEX ||
category == BlockCategory.BLOOM ||
(prefetchOnOpen &&
(category != BlockCategory.META &&
category != BlockCategory.UNKNOWN)));
}
示例4: shouldCacheCompressed
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* @return true if this {@link BlockCategory} should be compressed in blockcache, false otherwise
*/
public boolean shouldCacheCompressed(BlockCategory category) {
if (!isBlockCacheEnabled()) return false;
switch (category) {
case DATA:
return this.cacheDataCompressed;
default:
return false;
}
}
示例5: cacheBlock
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
final boolean cacheDataInL1) {
boolean isMetaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA;
if (isMetaBlock || cacheDataInL1) {
lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
} else {
l2Cache.cacheBlock(cacheKey, buf, inMemory, false);
}
}
示例6: getBlockMetricIndex
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
private static final int getBlockMetricIndex(BlockCategory blockCategory,
boolean isCompaction, BlockMetricType metricType) {
int i = 0;
i = i * NUM_BLOCK_CATEGORIES + blockCategory.ordinal();
i = i * BOOL_VALUES.length + (isCompaction ? 1 : 0);
i = i * NUM_METRIC_TYPES + metricType.ordinal();
return i;
}
示例7: getBlockMetricName
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
public String getBlockMetricName(BlockCategory blockCategory,
boolean isCompaction, BlockMetricType metricType) {
if (isCompaction && !metricType.compactionAware) {
throw new IllegalArgumentException("isCompaction cannot be true for "
+ metricType);
}
return blockMetricNames[getBlockMetricIndex(blockCategory, isCompaction,
metricType)];
}
示例8: incrNumericMetric
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Increments the given metric, both per-CF and aggregate, for both the given
* category and all categories in aggregate (four counters total).
*/
private void incrNumericMetric(BlockCategory blockCategory,
boolean isCompaction, BlockMetricType metricType, long amount) {
if (blockCategory == null) {
blockCategory = BlockCategory.UNKNOWN; // So that we see this in stats.
}
RegionMetricsStorage.incrNumericMetric(getBlockMetricName(blockCategory,
isCompaction, metricType), amount);
if (blockCategory != BlockCategory.ALL_CATEGORIES) {
incrNumericMetric(BlockCategory.ALL_CATEGORIES, isCompaction,
metricType, amount);
}
}
示例9: addToReadTime
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
private void addToReadTime(BlockCategory blockCategory,
boolean isCompaction, long timeMs) {
RegionMetricsStorage.incrTimeVaryingMetric(getBlockMetricName(blockCategory,
isCompaction, BlockMetricType.READ_TIME), timeMs);
// Also update the read time aggregated across all block categories
if (blockCategory != BlockCategory.ALL_CATEGORIES) {
addToReadTime(BlockCategory.ALL_CATEGORIES, isCompaction, timeMs);
}
}
示例10: updateOnCacheHit
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Updates the number of hits and the total number of block reads on a block
* cache hit.
*/
public void updateOnCacheHit(BlockCategory blockCategory,
boolean isCompaction, long count) {
blockCategory.expectSpecific();
int idx = getCacheHitMetricIndex(blockCategory, isCompaction);
if (this.onHitCacheMetrics.addAndGet(idx, count) > THRESHOLD_METRICS_FLUSH) {
flushCertainOnCacheHitMetrics(blockCategory, isCompaction);
}
if (this != ALL_SCHEMA_METRICS) {
ALL_SCHEMA_METRICS.updateOnCacheHit(blockCategory, isCompaction, count);
}
}
示例11: flushCertainOnCacheHitMetrics
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
private void flushCertainOnCacheHitMetrics(BlockCategory blockCategory, boolean isCompaction) {
int idx = getCacheHitMetricIndex(blockCategory, isCompaction);
long tempCount = this.onHitCacheMetrics.getAndSet(idx, 0);
if (tempCount > 0) {
incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_HIT, tempCount);
incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT, tempCount);
}
}
示例12: flushOnCacheHitMetrics
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Flush the on cache hit metrics;
*/
private void flushOnCacheHitMetrics() {
for (BlockCategory blockCategory : BlockCategory.values()) {
for (boolean isCompaction : BOOL_VALUES) {
flushCertainOnCacheHitMetrics (blockCategory, isCompaction);
}
}
if (this != ALL_SCHEMA_METRICS) {
ALL_SCHEMA_METRICS.flushOnCacheHitMetrics();
}
}
示例13: updateOnCacheMiss
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Updates read time, the number of misses, and the total number of block
* reads on a block cache miss.
*/
public void updateOnCacheMiss(BlockCategory blockCategory,
boolean isCompaction, long timeMs) {
blockCategory.expectSpecific();
addToReadTime(blockCategory, isCompaction, timeMs);
incrNumericMetric(blockCategory, isCompaction, BlockMetricType.CACHE_MISS);
incrNumericMetric(blockCategory, isCompaction, BlockMetricType.READ_COUNT);
if (this != ALL_SCHEMA_METRICS) {
ALL_SCHEMA_METRICS.updateOnCacheMiss(blockCategory, isCompaction,
timeMs);
}
}
示例14: addToCacheSize
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
/**
* Adds the given delta to the cache size for the given block category and
* the aggregate metric for all block categories. Updates both the per-CF
* counter and the counter for all CFs (four metrics total). The cache size
* metric is "persistent", i.e. it does not get reset when metrics are
* collected.
*/
public void addToCacheSize(BlockCategory category, long cacheSizeDelta) {
if (category == null) {
category = BlockCategory.ALL_CATEGORIES;
}
RegionMetricsStorage.incrNumericPersistentMetric(getBlockMetricName(category, false,
BlockMetricType.CACHE_SIZE), cacheSizeDelta);
if (category != BlockCategory.ALL_CATEGORIES) {
addToCacheSize(BlockCategory.ALL_CATEGORIES, cacheSizeDelta);
}
}
示例15: updateOnCachePutOrEvict
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; //导入依赖的package包/类
public void updateOnCachePutOrEvict(BlockCategory blockCategory,
long cacheSizeDelta, boolean isEviction) {
addToCacheSize(blockCategory, cacheSizeDelta);
incrNumericMetric(blockCategory, false,
isEviction ? BlockMetricType.EVICTED : BlockMetricType.CACHED);
if (this != ALL_SCHEMA_METRICS) {
ALL_SCHEMA_METRICS.updateOnCachePutOrEvict(blockCategory, cacheSizeDelta,
isEviction);
}
}