本文整理匯總了Java中org.apache.hadoop.hbase.HColumnDescriptor.setBlockCacheEnabled方法的典型用法代碼示例。如果您正苦於以下問題:Java HColumnDescriptor.setBlockCacheEnabled方法的具體用法?Java HColumnDescriptor.setBlockCacheEnabled怎麽用?Java HColumnDescriptor.setBlockCacheEnabled使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HColumnDescriptor
的用法示例。
在下文中一共展示了HColumnDescriptor.setBlockCacheEnabled方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: postStartMaster
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/********************************* Master related hooks **********************************/
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
// Need to create the new system table for labels here
MasterServices master = ctx.getEnvironment().getMasterServices();
if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
labelsColumn.setBloomFilterType(BloomType.NONE);
labelsColumn.setBlockCacheEnabled(false); // We will cache all the labels. No need of normal
// table block cache.
labelsTable.addFamily(labelsColumn);
// Let the "labels" table having only one region always. We are not expecting too many labels in
// the system.
labelsTable.setValue(HTableDescriptor.SPLIT_POLICY,
DisabledRegionSplitPolicy.class.getName());
labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING),
Bytes.toBytes(true));
master.createTable(labelsTable, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
}
}
示例2: testCacheBlocks
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
@Test
public void testCacheBlocks() throws IOException {
// Set index block size to be the same as normal block size.
TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, BLOCK_SIZE);
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS).
setCompressionType(COMPRESSION_ALGORITHM).
setBloomFilterType(BLOOM_TYPE);
hcd.setBlocksize(BLOCK_SIZE);
hcd.setBlockCacheEnabled(cfCacheEnabled);
Region region = TEST_UTIL.createTestRegion(TABLE, hcd);
BlockCache cache = region.getStore(hcd.getName()).getCacheConfig().getBlockCache();
CacheStats stats = cache.getStats();
writeTestData(region);
assertEquals(0, stats.getHitCount());
assertEquals(0, HFile.dataBlockReadCnt.get());
// Do a single get, take count of caches. If we are NOT caching DATA blocks, the miss
// count should go up. Otherwise, all should be cached and the miss count should not rise.
region.get(new Get(Bytes.toBytes("row" + 0)));
assertTrue(stats.getHitCount() > 0);
assertTrue(HFile.dataBlockReadCnt.get() > 0);
long missCount = stats.getMissCount();
region.get(new Get(Bytes.toBytes("row" + 0)));
if (this.cfCacheEnabled) assertEquals(missCount, stats.getMissCount());
else assertTrue(stats.getMissCount() > missCount);
}
示例3: setInfoFamilyCachingForMeta
import org.apache.hadoop.hbase.HColumnDescriptor; //導入方法依賴的package包/類
/**
* Enable in memory caching for hbase:meta
*/
public static void setInfoFamilyCachingForMeta(final HTableDescriptor metaDescriptor,
final boolean b) {
for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
hcd.setBlockCacheEnabled(b);
hcd.setInMemory(b);
}
}
}