当前位置: 首页>>代码示例>>Java>>正文


Java SchemaMetrics.getInstance方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.getInstance方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics.getInstance方法的具体用法?Java SchemaMetrics.getInstance怎么用?Java SchemaMetrics.getInstance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics的用法示例。


在下文中一共展示了SchemaMetrics.getInstance方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultipleRegions

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testMultipleRegions() throws IOException, InterruptedException {

  TEST_UTIL.createRandomTable(
      TABLE_NAME,
      Arrays.asList(FAMILIES),
      MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);

  final HRegionServer rs =
      TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);

  assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());

  rs.doMetrics();
  for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
      Bytes.toBytes(TABLE_NAME))) {
    for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
      LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
          Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
          ": " + storeEntry.getValue().getStorefiles());
    }
  }

  assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
      + META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);

  for (String cf : FAMILIES) {
    SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
    assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics,
        StoreMetricType.STORE_FILE_COUNT);
  }

  // ensure that the max value is also maintained
  final String storeMetricName = ALL_METRICS
      .getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT);
  assertEquals("Invalid value for store metric " + storeMetricName,
      NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:39,代码来源:TestRegionServerMetrics.java

示例2: _testBlocksScanned

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
  HRegion r = createNewHRegion(table, START_KEY, END_KEY,
      TEST_UTIL.getConfiguration());
  addContent(r, FAMILY, COL);
  r.flushcache();

  // Get the per-cf metrics
  SchemaMetrics schemaMetrics =
    SchemaMetrics.getInstance(Bytes.toString(table.getName()), Bytes.toString(FAMILY));
  Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();

  // Do simple test of getting one row only first.
  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  scan.addColumn(FAMILY, COL);
  scan.setMaxVersions(1);

  InternalScanner s = r.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (s.next(results));
  s.close();

  int expectResultSize = 'z' - 'a';
  Assert.assertEquals(expectResultSize, results.size());

  int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
  Assert.assertEquals(2, kvPerBlock);

  long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
  long expectIndexBlockRead = expectDataBlockRead;

  verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
      expectDataBlockRead, expectIndexBlockRead);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:TestBlocksScanned.java

示例3: testBlocksScanned

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testBlocksScanned() throws Exception {
  HRegion r = createNewHRegion(TESTTABLEDESC, START_KEY, END_KEY,
      TEST_UTIL.getConfiguration());
  addContent(r, FAMILY, COL);
  r.flushcache();

  // Get the per-cf metrics
  SchemaMetrics schemaMetrics =
    SchemaMetrics.getInstance(Bytes.toString(TABLE), Bytes.toString(FAMILY));
  Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();

  // Do simple test of getting one row only first.
  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  scan.addColumn(FAMILY, COL);
  scan.setMaxVersions(1);

  InternalScanner s = r.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (s.next(results));
  s.close();

  int expectResultSize = 'z' - 'a';
  Assert.assertEquals(expectResultSize, results.size());

  int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
  Assert.assertEquals(2, kvPerBlock);

  long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
  long expectIndexBlockRead = expectDataBlockRead;

  verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
      expectDataBlockRead, expectIndexBlockRead);
}
 
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:35,代码来源:TestBlocksScanned.java

示例4: testScannerSelection

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:47,代码来源:TestScannerSelectionUsingKeyRange.java

示例5: testCacheBlocks

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testCacheBlocks() throws IOException {
  // Set index block size to be the same as normal block size.
  TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,
      BLOCK_SIZE);

  SchemaMetrics.setUseTableNameInTest(false);
  HColumnDescriptor hcd =
      new HColumnDescriptor(Bytes.toBytes(CF))
          .setMaxVersions(MAX_VERSIONS)
          .setCompressionType(COMPRESSION_ALGORITHM)
          .setBloomFilterType(BLOOM_TYPE);
  hcd.setBlocksize(BLOCK_SIZE);
  hcd.setBlockCacheEnabled(cfCacheEnabled);
  HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
  writeTestData(region);
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  for (int i = 0; i < NUM_ROWS; ++i) {
    Get get = new Get(Bytes.toBytes("row" + i));
    region.get(get, null);
  }
  SchemaMetrics.validateMetricChanges(metricsBefore);
  Map<String, Long> metricsAfter = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> metricsDelta = SchemaMetrics.diffMetrics(metricsBefore,
      metricsAfter);
  SchemaMetrics metrics = SchemaMetrics.getInstance(TABLE, CF);
  List<BlockCategory> importantBlockCategories =
      new ArrayList<BlockCategory>();
  importantBlockCategories.add(BlockCategory.BLOOM);
  if (hfileVersion == 2) {
    // We only have index blocks for HFile v2.
    importantBlockCategories.add(BlockCategory.INDEX);
  }

  for (BlockCategory category : importantBlockCategories) {
    String hitsMetricName = getMetricName(metrics, category);
    assertTrue("Metric " + hitsMetricName + " was not incremented",
        metricsDelta.containsKey(hitsMetricName));
    long hits = metricsDelta.get(hitsMetricName);
    assertTrue("Invalid value of " + hitsMetricName + ": " + hits, hits > 0);
  }

  if (!cfCacheEnabled) {
    // Caching is turned off for the CF, so make sure we are not caching data
    // blocks.
    String dataHitMetricName = getMetricName(metrics, BlockCategory.DATA);
    assertFalse("Nonzero value for metric " + dataHitMetricName,
        metricsDelta.containsKey(dataHitMetricName));
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:51,代码来源:TestForceCacheImportantBlocks.java


注:本文中的org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.getInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。