本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.setUseTableNameInTest方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaMetrics.setUseTableNameInTest方法的具体用法?Java SchemaMetrics.setUseTableNameInTest怎么用?Java SchemaMetrics.setUseTableNameInTest使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics
的用法示例。
在下文中一共展示了SchemaMetrics.setUseTableNameInTest方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
/**
* Testcase to check state of region initialization task set to ABORTED or not if any exceptions
* during initialization
*
* @throws Exception
*/
@Test
public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
HRegionInfo info = null;
try {
FileSystem fs = Mockito.mock(FileSystem.class);
Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException());
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("cf"));
info = new HRegionInfo(htd.getName(), HConstants.EMPTY_BYTE_ARRAY,
HConstants.EMPTY_BYTE_ARRAY, false);
Path path = new Path(DIR + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
// no where we are instantiating HStore in this test case so useTableNameGlobally is null. To
// avoid NullPointerException we are setting useTableNameGlobally to false.
SchemaMetrics.setUseTableNameInTest(false);
region = HRegion.newHRegion(path, null, fs, conf, info, htd, null);
// region initialization throws IOException and set task state to ABORTED.
region.initialize();
fail("Region initialization should fail due to IOException");
} catch (IOException io) {
List<MonitoredTask> tasks = TaskMonitor.get().getTasks();
for (MonitoredTask monitoredTask : tasks) {
if (!(monitoredTask instanceof MonitoredRPCHandler)
&& monitoredTask.getDescription().contains(region.toString())) {
assertTrue("Region state should be ABORTED.",
monitoredTask.getState().equals(MonitoredTask.State.ABORTED));
break;
}
}
} finally {
HRegion.closeHRegion(region);
}
}
示例2: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
this.mvcc = new MultiVersionConsistencyControl();
this.memstore = new MemStore();
SchemaMetrics.setUseTableNameInTest(false);
}
示例3: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
SchemaMetrics.setUseTableNameInTest(true);
TEST_UTIL = new HBaseTestingUtility();
TESTTABLEDESC = new HTableDescriptor(TABLE);
TESTTABLEDESC.addFamily(
new HColumnDescriptor(FAMILY)
.setMaxVersions(10)
.setBlockCacheEnabled(true)
.setBlocksize(BLOCK_SIZE)
.setCompressionType(Compression.Algorithm.NONE)
);
}
示例4: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
SchemaMetrics.setUseTableNameInTest(true);
startingMetrics = SchemaMetrics.getMetricsSnapshot();
TEST_UTIL.startMiniCluster();
}
示例5: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
public void setUp() throws Exception {
super.setUp();
SchemaMetrics.setUseTableNameInTest(false);
}
示例6: setUp
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
super.setUp();
SchemaMetrics.setUseTableNameInTest(true);
TEST_UTIL = new HBaseTestingUtility();
}
示例7: testCacheBlocks
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
@Test
public void testCacheBlocks() throws IOException {
// Set index block size to be the same as normal block size.
TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY,
BLOCK_SIZE);
SchemaMetrics.setUseTableNameInTest(false);
HColumnDescriptor hcd =
new HColumnDescriptor(Bytes.toBytes(CF))
.setMaxVersions(MAX_VERSIONS)
.setCompressionType(COMPRESSION_ALGORITHM)
.setBloomFilterType(BLOOM_TYPE);
hcd.setBlocksize(BLOCK_SIZE);
hcd.setBlockCacheEnabled(cfCacheEnabled);
HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
writeTestData(region);
Map<String, Long> metricsBefore = SchemaMetrics.getMetricsSnapshot();
for (int i = 0; i < NUM_ROWS; ++i) {
Get get = new Get(Bytes.toBytes("row" + i));
region.get(get, null);
}
SchemaMetrics.validateMetricChanges(metricsBefore);
Map<String, Long> metricsAfter = SchemaMetrics.getMetricsSnapshot();
Map<String, Long> metricsDelta = SchemaMetrics.diffMetrics(metricsBefore,
metricsAfter);
SchemaMetrics metrics = SchemaMetrics.getInstance(TABLE, CF);
List<BlockCategory> importantBlockCategories =
new ArrayList<BlockCategory>();
importantBlockCategories.add(BlockCategory.BLOOM);
if (hfileVersion == 2) {
// We only have index blocks for HFile v2.
importantBlockCategories.add(BlockCategory.INDEX);
}
for (BlockCategory category : importantBlockCategories) {
String hitsMetricName = getMetricName(metrics, category);
assertTrue("Metric " + hitsMetricName + " was not incremented",
metricsDelta.containsKey(hitsMetricName));
long hits = metricsDelta.get(hitsMetricName);
assertTrue("Invalid value of " + hitsMetricName + ": " + hits, hits > 0);
}
if (!cfCacheEnabled) {
// Caching is turned off for the CF, so make sure we are not caching data
// blocks.
String dataHitMetricName = getMetricName(metrics, BlockCategory.DATA);
assertFalse("Nonzero value for metric " + dataHitMetricName,
metricsDelta.containsKey(dataHitMetricName));
}
}
示例8: TestLruBlockCache
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; //导入方法依赖的package包/类
public TestLruBlockCache(boolean useTableName) {
SchemaMetrics.setUseTableNameInTest(useTableName);
}