当前位置: 首页>>代码示例>>Java>>正文


Java SchemaMetrics.getMetricsSnapshot方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.getMetricsSnapshot方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics.getMetricsSnapshot方法的具体用法?Java SchemaMetrics.getMetricsSnapshot怎么用?Java SchemaMetrics.getMetricsSnapshot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics的用法示例。


在下文中一共展示了SchemaMetrics.getMetricsSnapshot方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: _testBlocksScanned

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
  HRegion r = createNewHRegion(table, START_KEY, END_KEY,
      TEST_UTIL.getConfiguration());
  addContent(r, FAMILY, COL);
  r.flushcache();

  // Get the per-cf metrics
  SchemaMetrics schemaMetrics =
    SchemaMetrics.getInstance(Bytes.toString(table.getName()), Bytes.toString(FAMILY));
  Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();

  // Do simple test of getting one row only first.
  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  scan.addColumn(FAMILY, COL);
  scan.setMaxVersions(1);

  InternalScanner s = r.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (s.next(results));
  s.close();

  int expectResultSize = 'z' - 'a';
  Assert.assertEquals(expectResultSize, results.size());

  int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
  Assert.assertEquals(2, kvPerBlock);

  long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
  long expectIndexBlockRead = expectDataBlockRead;

  verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
      expectDataBlockRead, expectIndexBlockRead);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:TestBlocksScanned.java

示例2: verifyDataAndIndexBlockRead

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
    SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
  Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> diffs =
    SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);

  long dataBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  long indexBlockRead = SchemaMetrics.getLong(diffs,
      schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));

  Assert.assertEquals(expectDataBlockRead, dataBlockRead);
  Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:15,代码来源:TestBlocksScanned.java

示例3: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() throws IOException {
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  conf = TEST_UTIL.getConfiguration();
  fs = FileSystem.get(conf);
  SchemaMetrics.configureGlobally(conf);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:8,代码来源:TestHFileReaderV1.java

示例4: testBlocksScanned

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testBlocksScanned() throws Exception {
  HRegion r = createNewHRegion(TESTTABLEDESC, START_KEY, END_KEY,
      TEST_UTIL.getConfiguration());
  addContent(r, FAMILY, COL);
  r.flushcache();

  // Get the per-cf metrics
  SchemaMetrics schemaMetrics =
    SchemaMetrics.getInstance(Bytes.toString(TABLE), Bytes.toString(FAMILY));
  Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();

  // Do simple test of getting one row only first.
  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  scan.addColumn(FAMILY, COL);
  scan.setMaxVersions(1);

  InternalScanner s = r.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (s.next(results));
  s.close();

  int expectResultSize = 'z' - 'a';
  Assert.assertEquals(expectResultSize, results.size());

  int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
  Assert.assertEquals(2, kvPerBlock);

  long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
  long expectIndexBlockRead = expectDataBlockRead;

  verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
      expectDataBlockRead, expectIndexBlockRead);
}
 
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:35,代码来源:TestBlocksScanned.java

示例5: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
 * @see org.apache.hadoop.hbase.HBaseTestCase#setUp()
 */
@Override
protected void setUp() throws Exception {
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  super.setUp();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:9,代码来源:TestHRegion.java

示例6: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  SchemaMetrics.setUseTableNameInTest(true);
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  TEST_UTIL.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:7,代码来源:TestRegionServerMetrics.java

示例7: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
  super.setUp();
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
  ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:7,代码来源:TestStoreFile.java

示例8: testScannerSelection

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:74,代码来源:TestScannerSelectionUsingTTL.java

示例9: testNotCachingDataBlocksDuringCompaction

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testNotCachingDataBlocksDuringCompaction() throws IOException {
  // TODO: need to change this test if we add a cache size threshold for
  // compactions, or if we implement some other kind of intelligent logic for
  // deciding what blocks to cache-on-write on compaction.
  final String table = "CompactionCacheOnWrite";
  final String cf = "myCF";
  final byte[] cfBytes = Bytes.toBytes(cf);
  final int maxVersions = 3;
  HRegion region = TEST_UTIL.createTestRegion(table, 
      new HColumnDescriptor(cf)
          .setCompressionType(compress)
          .setBloomFilterType(BLOOM_TYPE)
          .setMaxVersions(maxVersions)
          .setDataBlockEncoding(encoder.getEncodingInCache())
          .setEncodeOnDisk(encoder.getEncodingOnDisk() !=
              DataBlockEncoding.NONE)
  );
  int rowIdx = 0;
  long ts = EnvironmentEdgeManager.currentTimeMillis();
  for (int iFile = 0; iFile < 5; ++iFile) {
    for (int iRow = 0; iRow < 500; ++iRow) {
      String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 
          iRow;
      Put p = new Put(Bytes.toBytes(rowStr));
      ++rowIdx;
      for (int iCol = 0; iCol < 10; ++iCol) {
        String qualStr = "col" + iCol;
        String valueStr = "value_" + rowStr + "_" + qualStr;
        for (int iTS = 0; iTS < 5; ++iTS) {
          p.add(cfBytes, Bytes.toBytes(qualStr), ts++,
              Bytes.toBytes(valueStr));
        }
      }
      region.put(p);
    }
    region.flushcache();
  }
  LruBlockCache blockCache =
      (LruBlockCache) new CacheConfig(conf).getBlockCache();
  blockCache.clearCache();
  assertEquals(0, blockCache.getBlockTypeCountsForTest().size());
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  region.compactStores();
  LOG.debug("compactStores() returned");
  SchemaMetrics.validateMetricChanges(metricsBefore);
  Map<String, Long> compactionMetrics = SchemaMetrics.diffMetrics(
      metricsBefore, SchemaMetrics.getMetricsSnapshot());
  LOG.debug(SchemaMetrics.formatMetrics(compactionMetrics));
  Map<BlockType, Integer> blockTypesInCache =
      blockCache.getBlockTypeCountsForTest();
  LOG.debug("Block types in cache: " + blockTypesInCache);
  assertNull(blockTypesInCache.get(BlockType.DATA));
  region.close();
  blockCache.shutdown();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:57,代码来源:TestCacheOnWrite.java

示例10: testScannerSelection

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:47,代码来源:TestScannerSelectionUsingKeyRange.java

示例11: testCacheBlocks

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testCacheBlocks() throws IOException {
  // Set index block size to be the same as normal block size.
  TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,
      BLOCK_SIZE);

  SchemaMetrics.setUseTableNameInTest(false);
  HColumnDescriptor hcd =
      new HColumnDescriptor(Bytes.toBytes(CF))
          .setMaxVersions(MAX_VERSIONS)
          .setCompressionType(COMPRESSION_ALGORITHM)
          .setBloomFilterType(BLOOM_TYPE);
  hcd.setBlocksize(BLOCK_SIZE);
  hcd.setBlockCacheEnabled(cfCacheEnabled);
  HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
  writeTestData(region);
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  for (int i = 0; i < NUM_ROWS; ++i) {
    Get get = new Get(Bytes.toBytes("row" + i));
    region.get(get, null);
  }
  SchemaMetrics.validateMetricChanges(metricsBefore);
  Map<String, Long> metricsAfter = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> metricsDelta = SchemaMetrics.diffMetrics(metricsBefore,
      metricsAfter);
  SchemaMetrics metrics = SchemaMetrics.getInstance(TABLE, CF);
  List<BlockCategory> importantBlockCategories =
      new ArrayList<BlockCategory>();
  importantBlockCategories.add(BlockCategory.BLOOM);
  if (hfileVersion == 2) {
    // We only have index blocks for HFile v2.
    importantBlockCategories.add(BlockCategory.INDEX);
  }

  for (BlockCategory category : importantBlockCategories) {
    String hitsMetricName = getMetricName(metrics, category);
    assertTrue("Metric " + hitsMetricName + " was not incremented",
        metricsDelta.containsKey(hitsMetricName));
    long hits = metricsDelta.get(hitsMetricName);
    assertTrue("Invalid value of " + hitsMetricName + ": " + hits, hits > 0);
  }

  if (!cfCacheEnabled) {
    // Caching is turned off for the CF, so make sure we are not caching data
    // blocks.
    String dataHitMetricName = getMetricName(metrics, BlockCategory.DATA);
    assertFalse("Nonzero value for metric " + dataHitMetricName,
        metricsDelta.containsKey(dataHitMetricName));
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:51,代码来源:TestForceCacheImportantBlocks.java

示例12: setUp

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  startingMetrics = SchemaMetrics.getMetricsSnapshot();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:5,代码来源:TestLruBlockCache.java


注:本文中的org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.getMetricsSnapshot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。