当前位置: 首页>>代码示例>>Java>>正文


Java HStore.getStorefilesCount方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HStore.getStorefilesCount方法的典型用法代码示例。如果您正苦于以下问题:Java HStore.getStorefilesCount方法的具体用法?Java HStore.getStorefilesCount怎么用?Java HStore.getStorefilesCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HStore的用法示例。


在下文中一共展示了HStore.getStorefilesCount方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCompactionWithoutThroughputLimit

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private long testCompactionWithoutThroughputLimit() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    NoLimitThroughputController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  try {
    HStore store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    long startTime = System.currentTimeMillis();
    TEST_UTIL.getAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() != 1) {
      Thread.sleep(20);
    }
    return System.currentTimeMillis() - startTime;
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:TestCompactionWithThroughputController.java

示例2: countStoreFiles

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
public int countStoreFiles() {
  int count = 0;
  for (HStore store : stores.values()) {
    count += store.getStorefilesCount();
  }
  return count;
}
 
开发者ID:apache,项目名称:hbase,代码行数:8,代码来源:TestIOFencing.java

示例3: waitForStoreFileCount

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private void waitForStoreFileCount(HStore store, int count, int timeout)
    throws InterruptedException {
  long start = System.currentTimeMillis();
  while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) {
    Thread.sleep(100);
  }
  System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" +
      store.getStorefilesCount());
  assertEquals(count, store.getStorefilesCount());
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:TestFromClientSide.java

示例4: loadFlushAndCompact

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
  // create two hfiles in the region
  createHFileInRegion(region, family);
  createHFileInRegion(region, family);

  HStore s = region.getStore(family);
  int count = s.getStorefilesCount();
  assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
    count >= 2);

  // compact the two files into one file to get files in the archive
  LOG.debug("Compacting stores");
  region.compact(true);
}
 
开发者ID:apache,项目名称:hbase,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例5: testCompactionWithThroughputLimit

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private long testCompactionWithThroughputLimit() throws Exception {
  long throughputLimit = 1024L * 1024;
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  conf.setLong(
    PressureAwareCompactionThroughputController
      .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
    throughputLimit);
  conf.setLong(
    PressureAwareCompactionThroughputController
      .HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
    throughputLimit);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    PressureAwareCompactionThroughputController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  try {
    HStore store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    long startTime = System.currentTimeMillis();
    TEST_UTIL.getAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() != 1) {
      Thread.sleep(20);
    }
    long duration = System.currentTimeMillis() - startTime;
    double throughput = (double) store.getStorefilesSize() / duration * 1000;
    // confirm that the speed limit work properly(not too fast, and also not too slow)
    // 20% is the max acceptable error rate.
    assertTrue(throughput < throughputLimit * 1.2);
    assertTrue(throughput > throughputLimit * 0.8);
    return System.currentTimeMillis() - startTime;
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:38,代码来源:TestCompactionWithThroughputController.java

示例6: insertData

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private List<Cell> insertData(byte[] row1, byte[] qf1, byte[] qf2, byte[] fam1, long ts1,
    long ts2, long ts3, boolean withVal) throws IOException {
  // Putting data in Region
  Put put = null;
  KeyValue kv13 = null;
  KeyValue kv12 = null;
  KeyValue kv11 = null;

  KeyValue kv23 = null;
  KeyValue kv22 = null;
  KeyValue kv21 = null;
  if (!withVal) {
    kv13 = new KeyValue(row1, fam1, qf1, ts3, KeyValue.Type.Put, null);
    kv12 = new KeyValue(row1, fam1, qf1, ts2, KeyValue.Type.Put, null);
    kv11 = new KeyValue(row1, fam1, qf1, ts1, KeyValue.Type.Put, null);

    kv23 = new KeyValue(row1, fam1, qf2, ts3, KeyValue.Type.Put, null);
    kv22 = new KeyValue(row1, fam1, qf2, ts2, KeyValue.Type.Put, null);
    kv21 = new KeyValue(row1, fam1, qf2, ts1, KeyValue.Type.Put, null);
  } else {
    kv13 = new KeyValue(row1, fam1, qf1, ts3, KeyValue.Type.Put, val);
    kv12 = new KeyValue(row1, fam1, qf1, ts2, KeyValue.Type.Put, val);
    kv11 = new KeyValue(row1, fam1, qf1, ts1, KeyValue.Type.Put, val);

    kv23 = new KeyValue(row1, fam1, qf2, ts3, KeyValue.Type.Put, val);
    kv22 = new KeyValue(row1, fam1, qf2, ts2, KeyValue.Type.Put, val);
    kv21 = new KeyValue(row1, fam1, qf2, ts1, KeyValue.Type.Put, val);
  }

  put = new Put(row1);
  put.add(kv13);
  put.add(kv12);
  put.add(kv11);
  put.add(kv23);
  put.add(kv22);
  put.add(kv21);
  region.put(put);
  region.flush(true);
  HStore store = region.getStore(fam1);
  while (store.getStorefilesCount() <= 0) {
    try {
      Thread.sleep(20);
    } catch (InterruptedException e) {
    }
  }

  // Expected
  List<Cell> expected = new ArrayList<>();
  expected.add(kv13);
  expected.add(kv12);
  expected.add(kv23);
  expected.add(kv22);
  return expected;
}
 
开发者ID:apache,项目名称:hbase,代码行数:55,代码来源:TestScannerFromBucketCache.java


注:本文中的org.apache.hadoop.hbase.regionserver.HStore.getStorefilesCount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。