本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics类的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics类的具体用法?Java SchemaMetrics怎么用?Java SchemaMetrics使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
SchemaMetrics类属于org.apache.hadoop.hbase.regionserver.metrics包,在下文中一共展示了SchemaMetrics类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: HFileWriterV2
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV2(Configuration conf, CacheConfig cacheConf,
FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize,
Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder,
final KeyComparator comparator, final ChecksumType checksumType,
final int bytesPerChecksum, boolean includeMVCCReadpoint) throws IOException {
super(cacheConf,
ostream == null ? createOutputStream(conf, fs, path) : ostream,
path, blockSize, compressAlgo, blockEncoder, comparator);
SchemaMetrics.configureGlobally(conf);
this.checksumType = checksumType;
this.bytesPerChecksum = bytesPerChecksum;
this.includeMemstoreTS = includeMVCCReadpoint;
if (!conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false)) {
this.minorVersion = 0;
}
finishInit(conf);
}
示例2: assertTimeVaryingMetricCount
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
private void assertTimeVaryingMetricCount(int expectedCount, String table, String cf,
String regionName, String metricPrefix) {
Integer expectedCountInteger = new Integer(expectedCount);
if (cf != null) {
String cfKey =
SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.CF_PREFIX + cf + "."
+ metricPrefix;
Pair<Long, Integer> cfPair = RegionMetricsStorage.getTimeVaryingMetric(cfKey);
assertEquals(expectedCountInteger, cfPair.getSecond());
}
if (regionName != null) {
String rKey =
SchemaMetrics.TABLE_PREFIX + table + "." + SchemaMetrics.REGION_PREFIX + regionName + "."
+ metricPrefix;
Pair<Long, Integer> regionPair = RegionMetricsStorage.getTimeVaryingMetric(rKey);
assertEquals(expectedCountInteger, regionPair.getSecond());
}
}
示例3: assertSizeMetric
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
private void assertSizeMetric(String table, String[] cfs, int[] metrics) {
// we have getsize & nextsize for each column family
assertEquals(cfs.length * 2, metrics.length);
for (int i =0; i < cfs.length; ++i) {
String prefix = SchemaMetrics.generateSchemaMetricsPrefix(table, cfs[i]);
String getMetric = prefix + SchemaMetrics.METRIC_GETSIZE;
String nextMetric = prefix + SchemaMetrics.METRIC_NEXTSIZE;
// verify getsize and nextsize matches
int getSize = RegionMetricsStorage.getNumericMetrics().containsKey(getMetric) ?
RegionMetricsStorage.getNumericMetrics().get(getMetric).intValue() : 0;
int nextSize = RegionMetricsStorage.getNumericMetrics().containsKey(nextMetric) ?
RegionMetricsStorage.getNumericMetrics().get(nextMetric).intValue() : 0;
assertEquals(metrics[i], getSize);
assertEquals(metrics[cfs.length + i], nextSize);
}
}
示例4: StoreFile
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/**
* Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram
* depending on the underlying files (10-20MB?).
* @param fs The current file system to use.
* @param p The path of the file.
* @param blockcache <code>true</code> if the block cache is enabled.
* @param conf The current configuration.
* @param cacheConf The cache configuration and block cache reference.
* @param cfBloomType The bloom type to use for this store file as specified by column family
* configuration. This may or may not be the same as the Bloom filter type actually
* present in the HFile, because column family configuration might change. If this is
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
* @param dataBlockEncoder data block encoding algorithm.
* @throws IOException When opening the reader fails.
*/
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
final CacheConfig cacheConf, final BloomType cfBloomType,
final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
this.fs = fs;
this.path = p;
this.cacheConf = cacheConf;
this.dataBlockEncoder =
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE : dataBlockEncoder;
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
this.cfBloomType = cfBloomType;
} else {
LOG.info("Ignoring bloom filter check for file " + path + ": " + "cfBloomType=" + cfBloomType
+ " (disabled in config)");
this.cfBloomType = BloomType.NONE;
}
// cache the modification time stamp of this store file
FileStatus[] stats = FSUtils.listStatus(fs, p, null);
if (stats != null && stats.length == 1) {
this.modificationTimeStamp = stats[0].getModificationTime();
} else {
this.modificationTimeStamp = 0;
}
SchemaMetrics.configureGlobally(conf);
initPossibleIndexesAndReference(fs, p, conf);
}
示例5: initializeMetricNames
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/**
* Method used internally to initialize metric names throughout the constructors. To be called
* after the store variable has been initialized!
*/
private void initializeMetricNames() {
String tableName = SchemaMetrics.UNKNOWN;
String family = SchemaMetrics.UNKNOWN;
if (store != null) {
tableName = store.getTableName();
family = Bytes.toString(store.getFamily().getName());
}
this.metricNamePrefix = SchemaMetrics.generateSchemaMetricsPrefix(tableName, family);
}
示例6: HFileWriterV1
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/** Constructor that takes a path, creates and closes the output stream. */
public HFileWriterV1(Configuration conf, CacheConfig cacheConf,
FileSystem fs, Path path, FSDataOutputStream ostream,
int blockSize, Compression.Algorithm compress,
HFileDataBlockEncoder blockEncoder,
final KeyComparator comparator) throws IOException {
super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path,
blockSize, compress, blockEncoder, comparator);
SchemaMetrics.configureGlobally(conf);
}
示例7: getWriterFactory
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/**
* Returns the factory to be used to create {@link HFile} writers
*/
public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) {
SchemaMetrics.configureGlobally(conf);
int version = getFormatVersion(conf);
switch (version) {
case 1:
return new HFileWriterV1.WriterFactoryV1(conf, cacheConf);
case 2:
return new HFileWriterV2.WriterFactoryV2(conf, cacheConf);
default:
throw new IllegalArgumentException("Cannot create writer for HFile " + "format version "
+ version);
}
}
示例8: updateSizeMetrics
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/**
* Helper function that updates the local size counter and also updates any
* per-cf or per-blocktype metrics it can discern from given
* {@link CachedBlock}
*
* @param cb
* @param evict
*/
protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
long heapsize = cb.heapSize();
if (evict) {
heapsize *= -1;
}
Cacheable cachedBlock = cb.getBuffer();
SchemaMetrics schemaMetrics = cachedBlock.getSchemaMetrics();
if (schemaMetrics != null) {
schemaMetrics.updateOnCachePutOrEvict(
cachedBlock.getBlockType().getCategory(), heapsize, evict);
}
return size.addAndGet(heapsize);
}
示例9: testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
/**
* Testcase to check state of region initialization task set to ABORTED or not if any exceptions
* during initialization
*
* @throws Exception
*/
@Test
public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
HRegionInfo info = null;
try {
FileSystem fs = Mockito.mock(FileSystem.class);
Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException());
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("cf"));
info = new HRegionInfo(htd.getName(), HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY, false);
Path path = new Path(DIR + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
// no where we are instantiating HStore in this test case so useTableNameGlobally is null. To
// avoid NullPointerException we are setting useTableNameGlobally to false.
SchemaMetrics.setUseTableNameInTest(false);
region = HRegion.newHRegion(path, null, fs, conf, info, htd, null);
// region initialization throws IOException and set task state to ABORTED.
region.initialize();
fail("Region initialization should fail due to IOException");
} catch (IOException io) {
List<MonitoredTask> tasks = TaskMonitor.get().getTasks();
for (MonitoredTask monitoredTask : tasks) {
if (!(monitoredTask instanceof MonitoredRPCHandler)
&& monitoredTask.getDescription().contains(region.toString())) {
assertTrue("Region state should be ABORTED.",
monitoredTask.getState().equals(MonitoredTask.State.ABORTED));
break;
}
}
} finally {
HRegion.closeHRegion(region);
}
}
示例10: assertStoreMetricEquals
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
private void assertStoreMetricEquals(long expected,
SchemaMetrics schemaMetrics, StoreMetricType storeMetricType) {
final String storeMetricName =
schemaMetrics.getStoreMetricName(storeMetricType);
Long startValue = startingMetrics.get(storeMetricName);
assertEquals("Invalid value for store metric " + storeMetricName
+ " (type " + storeMetricType + ")", expected,
RegionMetricsStorage.getNumericMetric(storeMetricName)
- (startValue != null ? startValue : 0));
}
示例11: testMultipleRegions
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
@Test
public void testMultipleRegions() throws IOException, InterruptedException {
TEST_UTIL.createRandomTable(
TABLE_NAME,
Arrays.asList(FAMILIES),
MAX_VERSIONS, NUM_COLS_PER_ROW, NUM_FLUSHES, NUM_REGIONS, 1000);
final HRegionServer rs =
TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
assertEquals(NUM_REGIONS + META_AND_ROOT, rs.getOnlineRegions().size());
rs.doMetrics();
for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
Bytes.toBytes(TABLE_NAME))) {
for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
": " + storeEntry.getValue().getStorefiles());
}
}
assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS * FAMILIES.length
+ META_AND_ROOT, ALL_METRICS, StoreMetricType.STORE_FILE_COUNT);
for (String cf : FAMILIES) {
SchemaMetrics schemaMetrics = SchemaMetrics.getInstance(TABLE_NAME, cf);
assertStoreMetricEquals(NUM_FLUSHES * NUM_REGIONS, schemaMetrics,
StoreMetricType.STORE_FILE_COUNT);
}
// ensure that the max value is also maintained
final String storeMetricName = ALL_METRICS
.getStoreMetricNameMax(StoreMetricType.STORE_FILE_COUNT);
assertEquals("Invalid value for store metric " + storeMetricName,
NUM_FLUSHES, RegionMetricsStorage.getNumericMetric(storeMetricName));
}
示例12: _testBlocksScanned
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
HRegion r = createNewHRegion(table, START_KEY, END_KEY,
TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flushcache();
// Get the per-cf metrics
SchemaMetrics schemaMetrics =
SchemaMetrics.getInstance(Bytes.toString(table.getName()), Bytes.toString(FAMILY));
Map<String, Long> schemaMetricSnapshot = SchemaMetrics.getMetricsSnapshot();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
scan.addColumn(FAMILY, COL);
scan.setMaxVersions(1);
InternalScanner s = r.getScanner(scan);
List<KeyValue> results = new ArrayList<KeyValue>();
while (s.next(results));
s.close();
int expectResultSize = 'z' - 'a';
Assert.assertEquals(expectResultSize, results.size());
int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) results.get(0).getLength());
Assert.assertEquals(2, kvPerBlock);
long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
long expectIndexBlockRead = expectDataBlockRead;
verifyDataAndIndexBlockRead(schemaMetricSnapshot, schemaMetrics,
expectDataBlockRead, expectIndexBlockRead);
}
示例13: verifyDataAndIndexBlockRead
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
private void verifyDataAndIndexBlockRead(Map<String, Long> previousMetricSnapshot,
SchemaMetrics schemaMetrics, long expectDataBlockRead, long expectedIndexBlockRead){
Map<String, Long> currentMetricsSnapshot = SchemaMetrics.getMetricsSnapshot();
Map<String, Long> diffs =
SchemaMetrics.diffMetrics(previousMetricSnapshot, currentMetricsSnapshot);
long dataBlockRead = SchemaMetrics.getLong(diffs,
schemaMetrics.getBlockMetricName(BlockCategory.DATA, false, BlockMetricType.READ_COUNT));
long indexBlockRead = SchemaMetrics.getLong(diffs,
schemaMetrics.getBlockMetricName(BlockCategory.INDEX, false, BlockMetricType.READ_COUNT));
Assert.assertEquals(expectDataBlockRead, dataBlockRead);
Assert.assertEquals(expectedIndexBlockRead, indexBlockRead);
}
示例14: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
this.mvcc = new MultiVersionConsistencyControl();
this.memstore = new MemStore();
SchemaMetrics.setUseTableNameInTest(false);
}
示例15: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
startingMetrics = SchemaMetrics.getMetricsSnapshot();
conf = TEST_UTIL.getConfiguration();
fs = FileSystem.get(conf);
SchemaMetrics.configureGlobally(conf);
}