本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HStore.getStorefilesSize方法的典型用法代码示例。如果您正苦于以下问题:Java HStore.getStorefilesSize方法的具体用法?Java HStore.getStorefilesSize怎么用?Java HStore.getStorefilesSize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.HStore
的用法示例。
在下文中一共展示了HStore.getStorefilesSize方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateAndFlushData
import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
* Writes Puts to the table and flushes few times.
* @return {@link Pair} of (throughput, duration).
*/
private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
// Internally, throughput is controlled after every cell write, so keep value size less for
// better control.
final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
Random rand = new Random();
long duration = 0;
for (int i = 0; i < NUM_FLUSHES; i++) {
// Write about 10M (10 times of throughput rate) per iteration.
for (int j = 0; j < NUM_PUTS; j++) {
byte[] value = new byte[VALUE_SIZE];
rand.nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
}
long startTime = System.nanoTime();
hbtu.getAdmin().flush(tableName);
duration += System.nanoTime() - startTime;
}
HStore store = getStoreWithName(tableName);
assertEquals(NUM_FLUSHES, store.getStorefilesCount());
double throughput = (double)store.getStorefilesSize()
/ TimeUnit.NANOSECONDS.toSeconds(duration);
return new Pair<>(throughput, duration);
}
示例2: testCompactionWithThroughputLimit
import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private long testCompactionWithThroughputLimit() throws Exception {
long throughputLimit = 1024L * 1024;
Configuration conf = TEST_UTIL.getConfiguration();
conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
throughputLimit);
conf.setLong(
PressureAwareCompactionThroughputController
.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
throughputLimit);
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
PressureAwareCompactionThroughputController.class.getName());
TEST_UTIL.startMiniCluster(1);
try {
HStore store = prepareData();
assertEquals(10, store.getStorefilesCount());
long startTime = System.currentTimeMillis();
TEST_UTIL.getAdmin().majorCompact(tableName);
while (store.getStorefilesCount() != 1) {
Thread.sleep(20);
}
long duration = System.currentTimeMillis() - startTime;
double throughput = (double) store.getStorefilesSize() / duration * 1000;
// confirm that the speed limit work properly(not too fast, and also not too slow)
// 20% is the max acceptable error rate.
assertTrue(throughput < throughputLimit * 1.2);
assertTrue(throughput > throughputLimit * 0.8);
return System.currentTimeMillis() - startTime;
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}