当前位置: 首页>>代码示例>>Java>>正文


Java Store.getStorefilesCount方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.Store.getStorefilesCount方法的典型用法代码示例。如果您正苦于以下问题:Java Store.getStorefilesCount方法的具体用法?Java Store.getStorefilesCount怎么用?Java Store.getStorefilesCount使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.Store的用法示例。


在下文中一共展示了Store.getStorefilesCount方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testPurgeExpiredFiles

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
@Test
public void testPurgeExpiredFiles() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);

  TEST_UTIL.startMiniCluster(1);
  try {
    Store store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() > 1) {
      Thread.sleep(100);
    }
    assertTrue(store.getStorefilesCount() == 1);
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestFIFOCompactionPolicy.java

示例2: testCompactionWithoutThroughputLimit

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
private long testCompactionWithoutThroughputLimit() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    NoLimitCompactionThroughputController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  try {
    Store store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    long startTime = System.currentTimeMillis();
    TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() != 1) {
      Thread.sleep(20);
    }
    return System.currentTimeMillis() - startTime;
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestCompactionWithThroughputController.java

示例3: countStoreFiles

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
public int countStoreFiles() {
  int count = 0;
  for (Store store : stores.values()) {
    count += store.getStorefilesCount();
  }
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestIOFencing.java

示例4: waitForStoreFileCount

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
private void waitForStoreFileCount(Store store, int count, int timeout)
throws InterruptedException {
  long start = System.currentTimeMillis();
  while (start + timeout > System.currentTimeMillis() &&
      store.getStorefilesCount() != count) {
    Thread.sleep(100);
  }
  System.out.println("start=" + start + ", now=" +
      System.currentTimeMillis() + ", cur=" + store.getStorefilesCount());
  assertEquals(count, store.getStorefilesCount());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestFromClientSide.java

示例5: loadFlushAndCompact

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
private void loadFlushAndCompact(Region region, byte[] family) throws IOException {
  // create two hfiles in the region
  createHFileInRegion(region, family);
  createHFileInRegion(region, family);

  Store s = region.getStore(family);
  int count = s.getStorefilesCount();
  assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
    count >= 2);

  // compact the two files into one file to get files in the archive
  LOG.debug("Compacting stores");
  region.compact(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例6: testCompactionWithThroughputLimit

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
private long testCompactionWithThroughputLimit() throws Exception {
  long throughputLimit = 1024L * 1024;
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
  conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200);
  conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
  conf.setLong(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND,
    throughputLimit);
  conf.setLong(
    PressureAwareCompactionThroughputController.HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_LOWER_BOUND,
    throughputLimit);
  conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
    PressureAwareCompactionThroughputController.class.getName());
  TEST_UTIL.startMiniCluster(1);
  try {
    Store store = prepareData();
    assertEquals(10, store.getStorefilesCount());
    long startTime = System.currentTimeMillis();
    TEST_UTIL.getHBaseAdmin().majorCompact(tableName);
    while (store.getStorefilesCount() != 1) {
      Thread.sleep(20);
    }
    long duration = System.currentTimeMillis() - startTime;
    double throughput = (double) store.getStorefilesSize() / duration * 1000;
    // confirm that the speed limit work properly(not too fast, and also not too slow)
    // 20% is the max acceptable error rate.
    assertTrue(throughput < throughputLimit * 1.2);
    assertTrue(throughput > throughputLimit * 0.8);
    return System.currentTimeMillis() - startTime;
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestCompactionWithThroughputController.java

示例7: loadFlushAndCompact

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException {
  // create two hfiles in the region
  createHFileInRegion(region, family);
  createHFileInRegion(region, family);

  Store s = region.getStore(family);
  int count = s.getStorefilesCount();
  assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
    count >= 2);

  // compact the two files into one file to get files in the archive
  LOG.debug("Compacting stores");
  region.compactStores(true);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例8: testCompactionRecordDoesntBlockRolling

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
  Table table = null;
  Table table2 = null;

  // When the hbase:meta table can be opened, the region servers are running
  Table t = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  try {
    table = createTestTable(getName());
    table2 = createTestTable(getName() + "1");

    server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
    final WAL log = server.getWAL(null);
    Region region = server.getOnlineRegions(table2.getName()).get(0);
    Store s = region.getStore(HConstants.CATALOG_FAMILY);

    //have to flush namespace to ensure it doesn't affect wall tests
    admin.flush(TableName.NAMESPACE_TABLE_NAME);

    // Put some stuff into table2, to make sure we have some files to compact.
    for (int i = 1; i <= 2; ++i) {
      doPut(table2, i);
      admin.flush(table2.getName());
    }
    doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
    assertEquals("Should have no WAL after initial writes", 0,
        DefaultWALProvider.getNumRolledLogFiles(log));
    assertEquals(2, s.getStorefilesCount());

    // Roll the log and compact table2, to have compaction record in the 2nd WAL.
    log.rollWriter();
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
    admin.flush(table2.getName());
    region.compact(false);
    // Wait for compaction in case if flush triggered it before us.
    Assert.assertNotNull(s);
    for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
      Threads.sleepWithoutInterrupt(200);
    }
    assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());

    // Write some value to the table so the WAL cannot be deleted until table is flushed.
    doPut(table, 0); // Now 2nd WAL will have compaction record for table2 and put for table.
    log.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));

    // Flush table to make latest WAL obsolete; write another record, and roll again.
    admin.flush(table.getName());
    doPut(table, 1);
    log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
    assertEquals("Should have 1 WALs at the end", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
  } finally {
    if (t != null) t.close();
    if (table != null) table.close();
    if (table2 != null) table2.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:TestLogRolling.java

示例9: testCompactionRecordDoesntBlockRolling

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
  Table table = null;
  Table table2 = null;

  // When the hbase:meta table can be opened, the region servers are running
  Table t = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  try {
    table = createTestTable(getName());
    table2 = createTestTable(getName() + "1");

    server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
    final WAL log = server.getWAL(null);
    HRegion region = server.getOnlineRegions(table2.getName()).get(0);
    Store s = region.getStore(HConstants.CATALOG_FAMILY);

    //have to flush namespace to ensure it doesn't affect wall tests
    admin.flush(TableName.NAMESPACE_TABLE_NAME);

    // Put some stuff into table2, to make sure we have some files to compact.
    for (int i = 1; i <= 2; ++i) {
      doPut(table2, i);
      admin.flush(table2.getName());
    }
    doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
    assertEquals("Should have no WAL after initial writes", 0,
        DefaultWALProvider.getNumRolledLogFiles(log));
    assertEquals(2, s.getStorefilesCount());

    // Roll the log and compact table2, to have compaction record in the 2nd WAL.
    log.rollWriter();
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
    admin.flush(table2.getName());
    region.compactStores();
    // Wait for compaction in case if flush triggered it before us.
    Assert.assertNotNull(s);
    for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
      Threads.sleepWithoutInterrupt(200);
    }
    assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());

    // Write some value to the table so the WAL cannot be deleted until table is flushed.
    doPut(table, 0); // Now 2nd WAL will have compaction record for table2 and put for table.
    log.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));

    // Flush table to make latest WAL obsolete; write another record, and roll again.
    admin.flush(table.getName());
    doPut(table, 1);
    log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
    assertEquals("Should have 1 WALs at the end", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
  } finally {
    if (t != null) t.close();
    if (table != null) table.close();
    if (table2 != null) table2.close();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:65,代码来源:TestLogRolling.java

示例10: testCompactionRecordDoesntBlockRolling

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
  // When the hbase:meta table can be opened, the region servers are running
  new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);

  String tableName = getName();
  HTable table = createTestTable(tableName);
  String tableName2 = tableName + "1";
  HTable table2 = createTestTable(tableName2);

  server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName));
  this.log = server.getWAL();
  FSHLog fshLog = (FSHLog)log;
  HRegion region = server.getOnlineRegions(table2.getName()).get(0);
  Store s = region.getStore(HConstants.CATALOG_FAMILY);

  //have to flush namespace to ensure it doesn't affect wall tests
  admin.flush(TableName.NAMESPACE_TABLE_NAME.getName());

  // Put some stuff into table2, to make sure we have some files to compact.
  for (int i = 1; i <= 2; ++i) {
    doPut(table2, i);
    admin.flush(table2.getTableName());
  }
  doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
  assertEquals("Should have no WAL after initial writes", 0, fshLog.getNumRolledLogFiles());
  assertEquals(2, s.getStorefilesCount());

  // Roll the log and compact table2, to have compaction record in the 2nd WAL.
  fshLog.rollWriter();
  assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumRolledLogFiles());
  admin.flush(table2.getTableName());
  region.compactStores();
  // Wait for compaction in case if flush triggered it before us.
  Assert.assertNotNull(s);
  for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
    Threads.sleepWithoutInterrupt(200);
  }
  assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());

  // Write some value to the table so the WAL cannot be deleted until table is flushed.
  doPut(table, 0); // Now 2nd WAL will have compaction record for table2 and put for table.
  fshLog.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
  assertEquals("Should have WAL; one table is not flushed", 1, fshLog.getNumRolledLogFiles());

  // Flush table to make latest WAL obsolete; write another record, and roll again.
  admin.flush(table.getTableName());
  doPut(table, 1);
  fshLog.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
  assertEquals("Should have 1 WALs at the end", 1, fshLog.getNumRolledLogFiles());

  table.close();
  table2.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:59,代码来源:TestLogRolling.java


注:本文中的org.apache.hadoop.hbase.regionserver.Store.getStorefilesCount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。