当前位置: 首页>>代码示例>>Java>>正文


Java CacheConfig.getBlockCache方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.CacheConfig.getBlockCache方法的典型用法代码示例。如果您正苦于以下问题:Java CacheConfig.getBlockCache方法的具体用法?Java CacheConfig.getBlockCache怎么用?Java CacheConfig.getBlockCache使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.hfile.CacheConfig的用法示例。


在下文中一共展示了CacheConfig.getBlockCache方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initBlockCache

import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入方法依赖的package包/类
/**
 * It's possible that due to threading the block cache could not be initialized
 * yet (testing multiple region servers in one jvm).  So we need to try and initialize
 * the blockCache and cacheStats reference multiple times until we succeed.
 */
private synchronized  void initBlockCache() {
  CacheConfig cacheConfig = this.regionServer.cacheConfig;
  if (cacheConfig != null && this.blockCache == null) {
    this.blockCache = cacheConfig.getBlockCache();
  }

  if (this.blockCache != null && this.cacheStats == null) {
    this.cacheStats = blockCache.getStats();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:MetricsRegionServerWrapperImpl.java

示例2: setUp

import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入方法依赖的package包/类
@Before
public void setUp() throws IOException {
  conf = TEST_UTIL.getConfiguration();

  // This test requires the most recent HFile format (i.e. v2).
  conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);

  fs = FileSystem.get(conf);

  cacheConf = new CacheConfig(conf);
  blockCache = cacheConf.getBlockCache();
  assertNotNull(blockCache);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestCompoundBloomFilter.java

示例3: readStoreFile

import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入方法依赖的package包/类
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null, DataBlockEncoding.NONE);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:TestCacheOnWriteInSchema.java

示例4: readStoreFile

import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入方法依赖的package包/类
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig(); 
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
      BloomType.ROWCOL, null);
  store.passSchemaMetricsTo(sf);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:43,代码来源:TestCacheOnWriteInSchema.java

示例5: readStoreFile

import org.apache.hadoop.hbase.io.hfile.CacheConfig; //导入方法依赖的package包/类
private void readStoreFile(Path path) throws IOException {
  CacheConfig cacheConf = store.getCacheConfig();
  BlockCache cache = cacheConf.getBlockCache();
  StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.ROWCOL);
  HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
  try {
    // Open a scanner with (on read) caching disabled
    HFileScanner scanner = reader.getScanner(false, false);
    assertTrue(testDescription, scanner.seekTo());
    // Cribbed from io.hfile.TestCacheOnWrite
    long offset = 0;
    HFileBlock prevBlock = null;
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
        onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
      }
      // Flags: don't cache the block, use pread, this is not a compaction.
      // Also, pass null for expected block type to avoid checking it.
      HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
        false, true, null);
      BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
        offset);
      boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
      boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
      if (shouldBeCached != isCached) {
        throw new AssertionError(
          "shouldBeCached: " + shouldBeCached+ "\n" +
          "isCached: " + isCached + "\n" +
          "Test description: " + testDescription + "\n" +
          "block: " + block + "\n" +
          "blockCacheKey: " + blockCacheKey);
      }
      prevBlock = block;
      offset += block.getOnDiskSizeWithHeader();
    }
  } finally {
    reader.close();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:42,代码来源:TestCacheOnWriteInSchema.java


注:本文中的org.apache.hadoop.hbase.io.hfile.CacheConfig.getBlockCache方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。