本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.LruBlockCache.getEncodingCountsForTest方法的典型用法代码示例。如果您正苦于以下问题:Java LruBlockCache.getEncodingCountsForTest方法的具体用法?Java LruBlockCache.getEncodingCountsForTest怎么用?Java LruBlockCache.getEncodingCountsForTest使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.LruBlockCache
的用法示例。
在下文中一共展示了LruBlockCache.getEncodingCountsForTest方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入方法依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE).
setCompressTags(compressTags);
Region region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compact(false);
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例2: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入方法依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE).
setCompressTags(compressTags);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compactStores();
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例3: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入方法依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE).
setCompressTags(compressTags);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compact(false);
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例4: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入方法依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding " + encoding);
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compactStores();
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例5: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入方法依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding " + encoding);
LruBlockCache cache = (LruBlockCache)
new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
HRegion region = testUtil.createTestRegion(
TABLE_NAME, new HColumnDescriptor(CF_NAME)
.setMaxVersions(MAX_VERSIONS)
.setDataBlockEncoding(encoding)
.setEncodeOnDisk(encodeOnDisk)
.setBlocksize(BLOCK_SIZE)
);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compactStores();
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}