当前位置: 首页>>代码示例>>Java>>正文


Java SchemaMetrics.validateMetricChanges方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.validateMetricChanges方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics.validateMetricChanges方法的具体用法?Java SchemaMetrics.validateMetricChanges怎么用?Java SchemaMetrics.validateMetricChanges使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics的用法示例。


在下文中一共展示了SchemaMetrics.validateMetricChanges方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: tearDown

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
protected void tearDown() throws Exception {
  super.tearDown();
  EnvironmentEdgeManagerTestHelper.reset();
  SchemaMetrics.validateMetricChanges(startingMetrics);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:7,代码来源:TestHRegion.java

示例2: tearDown

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
  TEST_UTIL.shutdownMiniCluster();
  SchemaMetrics.validateMetricChanges(startingMetrics);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:6,代码来源:TestRegionServerMetrics.java

示例3: tearDown

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
public void tearDown() throws Exception {
  super.tearDown();
  SchemaMetrics.validateMetricChanges(startingMetrics);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:6,代码来源:TestStoreFile.java

示例4: testScannerSelection

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
          conf, htd);

  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  Map<String, Long> metricsBeforeCompaction =
    SchemaMetrics.getMetricsSnapshot();

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles);
  } else {
    region.compactStores();
  }

  SchemaMetrics.validateMetricChanges(metricsBeforeCompaction);
  Map<String, Long> compactionMetrics =
      SchemaMetrics.diffMetrics(metricsBeforeCompaction,
          SchemaMetrics.getMetricsSnapshot());
  long compactionDataBlocksRead = SchemaMetrics.getLong(
      compactionMetrics,
      SchemaMetrics.getInstance(TABLE, FAMILY).getBlockMetricName(
          BlockCategory.DATA, true, BlockMetricType.READ_COUNT));
  assertEquals("Invalid number of blocks accessed during compaction. " +
      "We only expect non-expired files to be accessed.",
      numFreshFiles, compactionDataBlocksRead);
  region.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:74,代码来源:TestScannerSelectionUsingTTL.java

示例5: tearDown

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
  SchemaMetrics.validateMetricChanges(startingMetrics);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:5,代码来源:TestHFileReaderV1.java

示例6: testNotCachingDataBlocksDuringCompaction

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testNotCachingDataBlocksDuringCompaction() throws IOException {
  // TODO: need to change this test if we add a cache size threshold for
  // compactions, or if we implement some other kind of intelligent logic for
  // deciding what blocks to cache-on-write on compaction.
  final String table = "CompactionCacheOnWrite";
  final String cf = "myCF";
  final byte[] cfBytes = Bytes.toBytes(cf);
  final int maxVersions = 3;
  HRegion region = TEST_UTIL.createTestRegion(table, 
      new HColumnDescriptor(cf)
          .setCompressionType(compress)
          .setBloomFilterType(BLOOM_TYPE)
          .setMaxVersions(maxVersions)
          .setDataBlockEncoding(encoder.getEncodingInCache())
          .setEncodeOnDisk(encoder.getEncodingOnDisk() !=
              DataBlockEncoding.NONE)
  );
  int rowIdx = 0;
  long ts = EnvironmentEdgeManager.currentTimeMillis();
  for (int iFile = 0; iFile < 5; ++iFile) {
    for (int iRow = 0; iRow < 500; ++iRow) {
      String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 
          iRow;
      Put p = new Put(Bytes.toBytes(rowStr));
      ++rowIdx;
      for (int iCol = 0; iCol < 10; ++iCol) {
        String qualStr = "col" + iCol;
        String valueStr = "value_" + rowStr + "_" + qualStr;
        for (int iTS = 0; iTS < 5; ++iTS) {
          p.add(cfBytes, Bytes.toBytes(qualStr), ts++,
              Bytes.toBytes(valueStr));
        }
      }
      region.put(p);
    }
    region.flushcache();
  }
  LruBlockCache blockCache =
      (LruBlockCache) new CacheConfig(conf).getBlockCache();
  blockCache.clearCache();
  assertEquals(0, blockCache.getBlockTypeCountsForTest().size());
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  region.compactStores();
  LOG.debug("compactStores() returned");
  SchemaMetrics.validateMetricChanges(metricsBefore);
  Map<String, Long> compactionMetrics = SchemaMetrics.diffMetrics(
      metricsBefore, SchemaMetrics.getMetricsSnapshot());
  LOG.debug(SchemaMetrics.formatMetrics(compactionMetrics));
  Map<BlockType, Integer> blockTypesInCache =
      blockCache.getBlockTypeCountsForTest();
  LOG.debug("Block types in cache: " + blockTypesInCache);
  assertNull(blockTypesInCache.get(BlockType.DATA));
  region.close();
  blockCache.shutdown();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:57,代码来源:TestCacheOnWrite.java

示例7: testScannerSelection

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flushcache();
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  SchemaMetrics.validateMetricChanges(metricsBefore);
  InternalScanner scanner = region.getScanner(scan);
  List<KeyValue> results = new ArrayList<KeyValue>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(accessedFiles.size(), 0);
  //assertEquals(cache.getBlockCount(), 0);
  Map<String, Long> diffMetrics = SchemaMetrics.diffMetrics(metricsBefore,
    SchemaMetrics.getMetricsSnapshot());
  SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE, FAMILY);
  long dataBlockRead = SchemaMetrics.getLong(diffMetrics,
    schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
  assertEquals(dataBlockRead, 0);
  region.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:47,代码来源:TestScannerSelectionUsingKeyRange.java

示例8: testCacheBlocks

import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testCacheBlocks() throws IOException {
  // Set index block size to be the same as normal block size.
  TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,
      BLOCK_SIZE);

  SchemaMetrics.setUseTableNameInTest(false);
  HColumnDescriptor hcd =
      new HColumnDescriptor(Bytes.toBytes(CF))
          .setMaxVersions(MAX_VERSIONS)
          .setCompressionType(COMPRESSION_ALGORITHM)
          .setBloomFilterType(BLOOM_TYPE);
  hcd.setBlocksize(BLOCK_SIZE);
  hcd.setBlockCacheEnabled(cfCacheEnabled);
  HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
  writeTestData(region);
  Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
  for (int i = 0; i < NUM_ROWS; ++i) {
    Get get = new Get(Bytes.toBytes("row" + i));
    region.get(get, null);
  }
  SchemaMetrics.validateMetricChanges(metricsBefore);
  Map<String, Long> metricsAfter = SchemaMetrics.getMetricsSnapshot();
  Map<String, Long> metricsDelta = SchemaMetrics.diffMetrics(metricsBefore,
      metricsAfter);
  SchemaMetrics metrics = SchemaMetrics.getInstance(TABLE, CF);
  List<BlockCategory> importantBlockCategories =
      new ArrayList<BlockCategory>();
  importantBlockCategories.add(BlockCategory.BLOOM);
  if (hfileVersion == 2) {
    // We only have index blocks for HFile v2.
    importantBlockCategories.add(BlockCategory.INDEX);
  }

  for (BlockCategory category : importantBlockCategories) {
    String hitsMetricName = getMetricName(metrics, category);
    assertTrue("Metric " + hitsMetricName + " was not incremented",
        metricsDelta.containsKey(hitsMetricName));
    long hits = metricsDelta.get(hitsMetricName);
    assertTrue("Invalid value of " + hitsMetricName + ": " + hits, hits > 0);
  }

  if (!cfCacheEnabled) {
    // Caching is turned off for the CF, so make sure we are not caching data
    // blocks.
    String dataHitMetricName = getMetricName(metrics, BlockCategory.DATA);
    assertFalse("Nonzero value for metric " + dataHitMetricName,
        metricsDelta.containsKey(dataHitMetricName));
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:51,代码来源:TestForceCacheImportantBlocks.java


注:本文中的org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.validateMetricChanges方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。