本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.LruBlockCache类的典型用法代码示例。如果您正苦于以下问题:Java LruBlockCache类的具体用法?Java LruBlockCache怎么用?Java LruBlockCache使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LruBlockCache类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了LruBlockCache类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE).
setCompressTags(compressTags);
Region region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compact(false);
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例2: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE).
setCompressTags(compressTags);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compactStores();
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例3: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
+ includeTags + ", compressTags : " + compressTags);
if(includeTags) {
testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
}
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE).
setCompressTags(compressTags);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compact(false);
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例4: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding " + encoding);
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compactStores();
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例5: testEncodedSeeker
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding " + encoding);
LruBlockCache cache = (LruBlockCache)
new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
HRegion region = testUtil.createTestRegion(
TABLE_NAME, new HColumnDescriptor(CF_NAME)
.setMaxVersions(MAX_VERSIONS)
.setDataBlockEncoding(encoding)
.setEncodeOnDisk(encodeOnDisk)
.setBlocksize(BLOCK_SIZE)
);
//write the data, but leave some in the memstore
doPuts(region);
//verify correctness when memstore contains data
doGets(region);
//verify correctness again after compacting
region.compactStores();
doGets(region);
Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
// Ensure that compactions don't pollute the cache with unencoded blocks
// in case of in-cache-only encoding.
System.err.println("encodingCounts=" + encodingCounts);
assertEquals(1, encodingCounts.size());
DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
assertEquals(encoding, encodingInCache);
assertTrue(encodingCounts.get(encodingInCache) > 0);
}
示例6: clearBlockCache
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
private void clearBlockCache() {
((LruBlockCache) cacheConf.getBlockCache()).clearCache();
}
示例7: testWholesomeSplit
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test public void testWholesomeSplit() throws IOException {
final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true);
assertTrue(rowcount > 0);
int parentRowCount = countRows(this.parent);
assertEquals(rowcount, parentRowCount);
// Pretend region's blocks are not in the cache, used for
// testWholesomeSplitWithHFileV1
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
((LruBlockCache) cacheConf.getBlockCache()).clearCache();
// Start transaction.
SplitTransactionImpl st = prepareGOOD_SPLIT_ROW();
// Run the execute. Look at what it returns.
Server mockServer = Mockito.mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
PairOfSameType<Region> daughters = st.execute(mockServer, null);
// Do some assertions about execution.
assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir()));
// Assert the parent region is closed.
assertTrue(this.parent.isClosed());
// Assert splitdir is empty -- because its content will have been moved out
// to be under the daughter region dirs.
assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length);
// Check daughters have correct key span.
assertTrue(Bytes.equals(parent.getRegionInfo().getStartKey(),
daughters.getFirst().getRegionInfo().getStartKey()));
assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getRegionInfo().getEndKey()));
assertTrue(Bytes.equals(daughters.getSecond().getRegionInfo().getStartKey(), GOOD_SPLIT_ROW));
assertTrue(Bytes.equals(parent.getRegionInfo().getEndKey(),
daughters.getSecond().getRegionInfo().getEndKey()));
// Count rows. daughters are already open
int daughtersRowCount = 0;
for (Region openRegion: daughters) {
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
daughtersRowCount += count;
} finally {
HRegion.closeHRegion((HRegion)openRegion);
}
}
assertEquals(rowcount, daughtersRowCount);
// Assert the write lock is no longer held on parent
assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
}
示例8: HDFSStoreImpl
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
public HDFSStoreImpl(String name, final HDFSStore config) {
this.configHolder = new HDFSStoreConfigHolder(config);
configHolder.setName(name);
logger = ComponentLogWriter.getHoplogLogWriter("HdfsStore:" + name, LogService.logger());
stats = new HFileStoreStatistics(InternalDistributedSystem.getAnyInstance(), "HDFSStoreStatistics", name);
final Configuration hconf;
if (config instanceof HDFSStoreFactoryImpl
&& null != ((HDFSStoreFactoryImpl) config).getConfiguration()) {
// this path is executed for loner
hconf = ((HDFSStoreFactoryImpl) config).getConfiguration();
} else {
hconf = new Configuration();
}
// Set the block cache size.
// Disable the static block cache. We keep our own cache on the HDFS Store
// hconf.setFloat("hfile.block.cache.size", 0f);
if (this.getBlockCacheSize() != 0) {
long cacheSize = (long) (HeapMemoryMonitor.getTenuredPoolMaxMemory() * this.getBlockCacheSize() / 100);
// TODO use an off heap block cache if we're using off heap memory?
// See CacheConfig.instantiateBlockCache.
// According to Anthony, the off heap block cache is still
// expirimental. Our own off heap cache might be a better bet.
// this.blockCache = new LruBlockCache(cacheSize,
// StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf, HFileSortedOplogFactory.convertStatistics(stats));
this.blockCache = new LruBlockCache(cacheSize,
StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf);
} else {
this.blockCache = null;
}
final String clientFile = config.getHDFSClientConfigFile();
fs = new SingletonValue<FileSystem>(new SingletonBuilder<FileSystem>() {
@Override
public FileSystem create() throws IOException {
return createFileSystem(hconf, clientFile, false);
}
@Override
public void postCreate() {
}
@Override
public void createInProgress() {
}
});
FileSystem fileSystem = null;
try {
fileSystem = fs.get();
} catch (Throwable ex) {
throw new HDFSIOException(ex.getMessage(),ex);
}
//HDFSCompactionConfig has already been initialized
long cleanUpIntervalMillis = getHDFSCompactionConfig().getOldFilesCleanupIntervalMins() * 60 * 1000;
Path cleanUpIntervalPath = new Path(getHomeDir(), HoplogConfig.CLEAN_UP_INTERVAL_FILE_NAME);
HoplogUtil.exposeCleanupIntervalMillis(fileSystem, cleanUpIntervalPath, cleanUpIntervalMillis);
}
示例9: testWholesomeSplit
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test public void testWholesomeSplit() throws IOException {
final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true);
assertTrue(rowcount > 0);
int parentRowCount = countRows(this.parent);
assertEquals(rowcount, parentRowCount);
// Pretend region's blocks are not in the cache, used for
// testWholesomeSplitWithHFileV1
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
((LruBlockCache) cacheConf.getBlockCache()).clearCache();
// Start transaction.
SplitTransaction st = prepareGOOD_SPLIT_ROW();
// Run the execute. Look at what it returns.
Server mockServer = Mockito.mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
// Do some assertions about execution.
assertTrue(this.fs.exists(st.getSplitDir()));
// Assert the parent region is closed.
assertTrue(this.parent.isClosed());
// Assert splitdir is empty -- because its content will have been moved out
// to be under the daughter region dirs.
assertEquals(0, this.fs.listStatus(st.getSplitDir()).length);
// Check daughters have correct key span.
assertTrue(Bytes.equals(this.parent.getStartKey(),
daughters.getFirst().getStartKey()));
assertTrue(Bytes.equals(GOOD_SPLIT_ROW,
daughters.getFirst().getEndKey()));
assertTrue(Bytes.equals(daughters.getSecond().getStartKey(),
GOOD_SPLIT_ROW));
assertTrue(Bytes.equals(this.parent.getEndKey(),
daughters.getSecond().getEndKey()));
// Count rows.
int daughtersRowCount = 0;
for (HRegion r: daughters) {
// Open so can count its content.
HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
r.getTableDesc(), r.getLog(), TEST_UTIL.getConfiguration());
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
daughtersRowCount += count;
} finally {
openRegion.close();
openRegion.getLog().closeAndDelete();
}
}
assertEquals(rowcount, daughtersRowCount);
// Assert the write lock is no longer held on parent
assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
}
示例10: testWholesomeSplit
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test public void testWholesomeSplit() throws IOException {
final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true);
assertTrue(rowcount > 0);
int parentRowCount = countRows(this.parent);
assertEquals(rowcount, parentRowCount);
// Pretend region's blocks are not in the cache, used for
// testWholesomeSplitWithHFileV1
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
((LruBlockCache) cacheConf.getBlockCache()).clearCache();
// Start transaction.
SplitTransaction st = prepareGOOD_SPLIT_ROW();
// Run the execute. Look at what it returns.
Server mockServer = Mockito.mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
// Do some assertions about execution.
assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir()));
// Assert the parent region is closed.
assertTrue(this.parent.isClosed());
// Assert splitdir is empty -- because its content will have been moved out
// to be under the daughter region dirs.
assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length);
// Check daughters have correct key span.
assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey()));
assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey()));
assertTrue(Bytes.equals(daughters.getSecond().getStartKey(), GOOD_SPLIT_ROW));
assertTrue(Bytes.equals(this.parent.getEndKey(), daughters.getSecond().getEndKey()));
// Count rows. daughters are already open
int daughtersRowCount = 0;
for (HRegion openRegion: daughters) {
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
daughtersRowCount += count;
} finally {
HRegion.closeHRegion(openRegion);
}
}
assertEquals(rowcount, daughtersRowCount);
// Assert the write lock is no longer held on parent
assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
}
示例11: testWholesomeSplit
import org.apache.hadoop.hbase.io.hfile.LruBlockCache; //导入依赖的package包/类
@Test public void testWholesomeSplit() throws IOException {
final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true);
assertTrue(rowcount > 0);
int parentRowCount = countRows(this.parent);
assertEquals(rowcount, parentRowCount);
// Pretend region's blocks are not in the cache, used for
// testWholesomeSplitWithHFileV1
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
((LruBlockCache) cacheConf.getBlockCache()).clearCache();
// Start transaction.
SplitTransaction st = prepareGOOD_SPLIT_ROW();
// Run the execute. Look at what it returns.
Server mockServer = Mockito.mock(Server.class);
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
// Do some assertions about execution.
assertTrue(this.fs.exists(st.getSplitDir()));
// Assert the parent region is closed.
assertTrue(this.parent.isClosed());
// Assert splitdir is empty -- because its content will have been moved out
// to be under the daughter region dirs.
assertEquals(0, this.fs.listStatus(st.getSplitDir()).length);
// Check daughters have correct key span.
assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey()));
assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey()));
assertTrue(Bytes.equals(daughters.getSecond().getStartKey(), GOOD_SPLIT_ROW));
assertTrue(Bytes.equals(this.parent.getEndKey(), daughters.getSecond().getEndKey()));
// Count rows. daughters are already open
int daughtersRowCount = 0;
for (HRegion openRegion: daughters) {
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
daughtersRowCount += count;
} finally {
HRegion.closeHRegion(openRegion);
}
}
assertEquals(rowcount, daughtersRowCount);
// Assert the write lock is no longer held on parent
assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
}