本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.BlockCache类的典型用法代码示例。如果您正苦于以下问题:Java BlockCache类的具体用法?Java BlockCache怎么用?Java BlockCache使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockCache类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了BlockCache类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: SortedOplogConfiguration
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
public SortedOplogConfiguration(String name, BlockCache blockCache, SortedOplogStatistics stats, HFileStoreStatistics storeStats) {
this.name = name;
this.stats = stats;
// defaults
bloom = true;
blockSize = 1 << 16;
bytesPerChecksum = 1 << 14;
checksum = Checksum.NONE;
compression = Compression.NONE;
keyEncoding = KeyEncoding.NONE;
comparator = new ByteComparator();
this.cacheDataBlocksOnRead = true;
this.storeStats = storeStats;
this.blockCache = blockCache;
}
示例2: testClearBlockCache
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
@Test
public void testClearBlockCache() throws Exception {
BlockCache blockCache1 = rs1.getCacheConfig().getBlockCache();
BlockCache blockCache2 = rs2.getCacheConfig().getBlockCache();
long initialBlockCount1 = blockCache1.getBlockCount();
long initialBlockCount2 = blockCache2.getBlockCount();
// scan will cause blocks to be added in BlockCache
scanAllRegionsForRS(rs1);
assertEquals(blockCache1.getBlockCount() - initialBlockCount1,
HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
clearRegionBlockCache(rs1);
scanAllRegionsForRS(rs2);
assertEquals(blockCache2.getBlockCount() - initialBlockCount2,
HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
clearRegionBlockCache(rs2);
assertEquals(initialBlockCount1, blockCache1.getBlockCount());
assertEquals(initialBlockCount2, blockCache2.getBlockCount());
}
示例3: testClearBlockCacheFromAdmin
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
@Test
public void testClearBlockCacheFromAdmin() throws Exception {
Admin admin = HTU.getAdmin();
// All RS run in a same process, so the block cache is same for rs1 and rs2
BlockCache blockCache = rs1.getCacheConfig().getBlockCache();
long initialBlockCount = blockCache.getBlockCount();
// scan will cause blocks to be added in BlockCache
scanAllRegionsForRS(rs1);
assertEquals(blockCache.getBlockCount() - initialBlockCount,
HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
scanAllRegionsForRS(rs2);
assertEquals(blockCache.getBlockCount() - initialBlockCount,
HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
+ HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME);
assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
+ HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
assertEquals(initialBlockCount, blockCache.getBlockCount());
}
示例4: testClearBlockCacheFromAsyncAdmin
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
@Test
public void testClearBlockCacheFromAsyncAdmin() throws Exception {
AsyncAdmin admin =
ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get().getAdmin();
// All RS run in a same process, so the block cache is same for rs1 and rs2
BlockCache blockCache = rs1.getCacheConfig().getBlockCache();
long initialBlockCount = blockCache.getBlockCount();
// scan will cause blocks to be added in BlockCache
scanAllRegionsForRS(rs1);
assertEquals(blockCache.getBlockCount() - initialBlockCount,
HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
scanAllRegionsForRS(rs2);
assertEquals(blockCache.getBlockCount() - initialBlockCount,
HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
+ HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get();
assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY)
+ HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
assertEquals(initialBlockCount, blockCache.getBlockCount());
}
示例5: testCreateCache
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* Test create cache.
*/
public void testCreateCache()
{
LOG.info("Test create cache started");
try{
Class<?> cls = Class.forName("com.koda.integ.hbase.blockcache.OffHeapBlockCache");
Constructor<?> ctr = cls.getDeclaredConstructor(Configuration.class );
cache = (BlockCache) ctr.newInstance(conf);
assertTrue(true);
LOG.info("Test create cache finished.");
} catch(Exception e){
LOG.error("Could not instantiate 'com.koda.integ.hbase.blockcache.OffHeapBlockCache'+ class, will resort to standard cache impl.");
assertTrue(false);
}
}
示例6: create
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
Server server, RegionServerAccounting regionServerAccounting) {
BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
if (blockCache instanceof ResizableBlockCache) {
return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server,
regionServerAccounting);
}
return null;
}
示例7: main
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* Facility for dumping and compacting catalog tables. Only does catalog tables since these are
* only tables we for sure know schema on. For usage run:
* <pre>
* ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
* </pre>
*
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
printUsageAndExit(null);
}
boolean majorCompact = false;
if (args.length > 1) {
if (!args[1].toLowerCase().startsWith("major")) {
printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
}
majorCompact = true;
}
final Path tableDir = new Path(args[0]);
final Configuration c = HBaseConfiguration.create();
final FileSystem fs = FileSystem.get(c);
final Path logdir = new Path(c.get("hbase.tmp.dir"));
final String logname = "wal" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();
final Configuration walConf = new Configuration(c);
FSUtils.setRootDir(walConf, logdir);
final WALFactory wals = new WALFactory(walConf, null, logname);
try {
processTable(fs, tableDir, wals, c, majorCompact);
} finally {
wals.close();
// TODO: is this still right?
BlockCache bc = new CacheConfig(c).getBlockCache();
if (bc != null) bc.shutdown();
}
}
示例8: teardown
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
@After
public void teardown() throws IOException {
if (region != null) {
BlockCache bc = region.getStores().get(0).getCacheConfig().getBlockCache();
((HRegion)region).close();
WAL wal = ((HRegion)region).getWAL();
if (wal != null) wal.close();
if (bc != null) bc.shutdown();
region = null;
}
}
示例9: HFileSortedOplog
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
public HFileSortedOplog(HDFSStoreImpl store, Path hfilePath,
BlockCache blockCache, SortedOplogStatistics stats,
HFileStoreStatistics storeStats) throws IOException {
super(store, hfilePath, stats);
cacheConf = getCacheConfInstance(blockCache, stats, storeStats);
reader = getReaderContainer();
}
示例10: main
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* Facility for dumping and compacting catalog tables. Only does catalog tables since these are
* only tables we for sure know schema on. For usage run:
*
* <pre>
* ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
* </pre>
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
printUsageAndExit(null);
}
boolean majorCompact = false;
if (args.length > 1) {
if (!args[1].toLowerCase().startsWith("major")) {
printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
}
majorCompact = true;
}
final Path tableDir = new Path(args[0]);
final Configuration c = HBaseConfiguration.create();
final FileSystem fs = FileSystem.get(c);
final Path logdir =
new Path(c.get("hbase.tmp.dir"), "hlog" + tableDir.getName()
+ EnvironmentEdgeManager.currentTimeMillis());
final Path oldLogDir = new Path(c.get("hbase.tmp.dir"), HConstants.HREGION_OLDLOGDIR_NAME);
final HLog log = new HLog(fs, logdir, oldLogDir, c);
try {
processTable(fs, tableDir, log, c, majorCompact);
} finally {
log.close();
// TODO: is this still right?
BlockCache bc = new CacheConfig(c).getBlockCache();
if (bc != null) bc.shutdown();
}
}
示例11: testBlockCacheSummary
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* This test inserts data into multiple tables and then reads both tables to ensure
* they are in the block cache.
*
* @throws Exception exception
*/
@Test
public void testBlockCacheSummary() throws Exception {
HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
addRows(ht, FAMILY);
HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
addRows(ht2, FAMILY);
TEST_UTIL.flush();
scan(ht, FAMILY);
scan(ht2, FAMILY);
BlockCache bc =
new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
List<BlockCacheColumnFamilySummary> bcs =
bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
LOG.info("blockCacheSummary: " + bcs);
assertEquals("blockCache summary has entries", 3, bcs.size());
BlockCacheColumnFamilySummary e = bcs.get(0);
assertEquals("table", "-ROOT-", e.getTable());
assertEquals("cf", "info", e.getColumnFamily());
e = bcs.get(1);
assertEquals("table", TEST_TABLE, e.getTable());
assertEquals("cf", TEST_CF, e.getColumnFamily());
e = bcs.get(2);
assertEquals("table", TEST_TABLE2, e.getTable());
assertEquals("cf", TEST_CF, e.getColumnFamily());
ht.close();
ht2.close();
}
示例12: create
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
public static HeapMemoryManager create(Configuration conf, FlushRequester memStoreFlusher,
Server server) {
BlockCache blockCache = CacheConfig.instantiateBlockCache(conf);
if (blockCache instanceof ResizableBlockCache) {
return new HeapMemoryManager((ResizableBlockCache) blockCache, memStoreFlusher, server);
}
return null;
}
示例13: main
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* Facility for dumping and compacting catalog tables.
* Only does catalog tables since these are only tables we for sure know
* schema on. For usage run:
* <pre>
* ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
* </pre>
*
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
printUsageAndExit(null);
}
boolean majorCompact = false;
if (args.length > 1) {
if (!args[1].toLowerCase().startsWith("major")) {
printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
}
majorCompact = true;
}
final Path tableDir = new Path(args[0]);
final Configuration c = HBaseConfiguration.create();
final FileSystem fs = FileSystem.get(c);
final Path logdir = new Path(c.get("hbase.tmp.dir"));
final String logname = "wal" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();
final Configuration walConf = new Configuration(c);
FSUtils.setRootDir(walConf, logdir);
final WALFactory wals = new WALFactory(walConf, null, logname);
try {
processTable(fs, tableDir, wals, c, majorCompact);
} finally {
wals.close();
// TODO: is this still right?
BlockCache bc = new CacheConfig(c).getBlockCache();
if (bc != null) bc.shutdown();
}
}
示例14: main
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* Facility for dumping and compacting catalog tables.
* Only does catalog tables since these are only tables we for sure know
* schema on. For usage run:
* <pre>
* ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
* </pre>
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
if (args.length < 1) {
printUsageAndExit(null);
}
boolean majorCompact = false;
if (args.length > 1) {
if (!args[1].toLowerCase().startsWith("major")) {
printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
}
majorCompact = true;
}
final Path tableDir = new Path(args[0]);
final Configuration c = HBaseConfiguration.create();
final FileSystem fs = FileSystem.get(c);
final Path logdir = new Path(c.get("hbase.tmp.dir"));
final String logname = "hlog" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();
final HLog log = HLogFactory.createHLog(fs, logdir, logname, c);
try {
processTable(fs, tableDir, log, c, majorCompact);
} finally {
log.close();
// TODO: is this still right?
BlockCache bc = new CacheConfig(c).getBlockCache();
if (bc != null) bc.shutdown();
}
}
示例15: testBlockCacheSummary
import org.apache.hadoop.hbase.io.hfile.BlockCache; //导入依赖的package包/类
/**
* This test inserts data into multiple tables and then reads both tables to ensure
* they are in the block cache.
*
* @throws Exception exception
*/
@Test
public void testBlockCacheSummary() throws Exception {
HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY);
addRows(ht, FAMILY);
HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY);
addRows(ht2, FAMILY);
TEST_UTIL.flush();
scan(ht, FAMILY);
scan(ht2, FAMILY);
BlockCache bc =
new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
List<BlockCacheColumnFamilySummary> bcs =
bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
LOG.info("blockCacheSummary: " + bcs);
assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2);
BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2);
assertEquals("table", TEST_TABLE, e.getTable());
assertEquals("cf", TEST_CF, e.getColumnFamily());
e = bcs.get(bcs.size()-1);
assertEquals("table", TEST_TABLE2, e.getTable());
assertEquals("cf", TEST_CF, e.getColumnFamily());
ht.close();
ht2.close();
}