当前位置: 首页>>代码示例>>Java>>正文


Java HRegion.flush方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.flush方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.flush方法的具体用法?Java HRegion.flush怎么用?Java HRegion.flush使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HRegion的用法示例。


在下文中一共展示了HRegion.flush方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Load region with rows from 'aaa' to 'zzz'.
 * @param r Region
 * @param f Family
 * @param flush flush the cache if true
 * @return Count of rows loaded.
 * @throws IOException
 */
public int loadRegion(final HRegion r, final byte[] f, final boolean flush)
throws IOException {
  byte[] k = new byte[3];
  int rowCount = 0;
  for (byte b1 = 'a'; b1 <= 'z'; b1++) {
    for (byte b2 = 'a'; b2 <= 'z'; b2++) {
      for (byte b3 = 'a'; b3 <= 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        Put put = new Put(k);
        put.setDurability(Durability.SKIP_WAL);
        put.add(f, null, k);
        if (r.getWAL() == null) {
          put.setDurability(Durability.SKIP_WAL);
        }
        int preRowCount = rowCount;
        int pause = 10;
        int maxPause = 1000;
        while (rowCount == preRowCount) {
          try {
            r.put(put);
            rowCount++;
          } catch (RegionTooBusyException e) {
            pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
            Threads.sleep(pause);
          }
        }
      }
    }
    if (flush) {
      r.flush(true);
    }
  }
  return rowCount;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:HBaseTestingUtility.java

示例2: createRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion createRegion(final HTableDescriptor desc,
    byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
throws IOException {
  HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
  HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
  LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
  for(int i = firstRow; i < firstRow + nrows; i++) {
    Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
    put.setDurability(Durability.SKIP_WAL);
    put.add(COLUMN_NAME, null,  VALUE);
    region.put(put);
    if (i % 10000 == 0) {
      LOG.info("Flushing write #" + i);
      region.flush(true);
    }
  }
  HRegion.closeHRegion(region);
  return region;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestMergeTable.java

示例3: testScannerSelection

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.hstore.compactionThreshold", 10000);
  HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true)
      .setBloomFilterType(bloomType);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);

  for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
  }

  Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
  CacheConfig.blockCacheDisabled = false;
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  while (scanner.next(results)) {
  }
  scanner.close();
  assertEquals(0, results.size());
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  assertEquals(expectedCount, accessedFiles.size());
  region.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestScannerSelectionUsingKeyRange.java

示例4: testRegionNormalizationMergeOnCluster

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test(timeout = 60000)
@SuppressWarnings("deprecation")
public void testRegionNormalizationMergeOnCluster() throws Exception {
  final TableName TABLENAME =
    TableName.valueOf("testRegionNormalizationMergeOnCluster");
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  HMaster m = cluster.getMaster();

  // create 5 regions with sizes to trigger merge of small regions
  try (HTable ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) {
    // Need to get sorted list of regions here
    List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
    Collections.sort(generatedRegions, new Comparator<HRegion>() {
      @Override
      public int compare(HRegion o1, HRegion o2) {
        return o1.getRegionInfo().compareTo(o2.getRegionInfo());
      }
    });

    HRegion region = generatedRegions.get(0);
    generateTestData(region, 1);
    region.flush(true);

    region = generatedRegions.get(1);
    generateTestData(region, 1);
    region.flush(true);

    region = generatedRegions.get(2);
    generateTestData(region, 3);
    region.flush(true);

    region = generatedRegions.get(3);
    generateTestData(region, 3);
    region.flush(true);

    region = generatedRegions.get(4);
    generateTestData(region, 5);
    region.flush(true);
  }

  HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
  htd.setNormalizationEnabled(true);
  admin.modifyTable(TABLENAME, htd);

  admin.flush(TABLENAME);

  assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), TABLENAME));

  // Now trigger a merge and stop when the merge is in progress
  Thread.sleep(5000); // to let region load to update
  m.normalizeRegions();

  while (MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), TABLENAME) > 4) {
    LOG.info("Waiting for normalization merge to complete");
    Thread.sleep(100);
  }

  assertEquals(4, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), TABLENAME));

  admin.disableTable(TABLENAME);
  admin.deleteTable(TABLENAME);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:63,代码来源:TestSimpleRegionNormalizerOnCluster.java

示例5: testFlushSequenceIdIsGreaterThanAllEditsInHFile

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test flush for sure has a sequence id that is beyond the last edit appended.  We do this
 * by slowing appends in the background ring buffer thread while in foreground we call
 * flush.  The addition of the sync over HRegion in flush should fix an issue where flush was
 * returning before all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException
 * @see HBASE-11109
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
  String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
  final TableName tableName = TableName.valueOf(testName);
  final HRegionInfo hri = new HRegionInfo(tableName);
  final byte[] rowName = tableName.getName();
  final HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
    TEST_UTIL.getConfiguration(), htd);
  HRegion.closeHRegion(r);
  final int countPerFamily = 10;
  final MutableBoolean goslow = new MutableBoolean(false);
  // subclass and doctor a method.
  FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
      testName, conf) {
    @Override
    void atHeadOfRingBufferEventHandlerAppend() {
      if (goslow.isTrue()) {
        Threads.sleep(100);
        LOG.debug("Sleeping before appending 100ms");
      }
      super.atHeadOfRingBufferEventHandlerAppend();
    }
  };
  HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
    TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
  EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
  try {
    List<Put> puts = null;
    for (HColumnDescriptor hcd: htd.getFamilies()) {
      puts =
        TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
    }

    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

    // Construct a WALEdit and add it a few times to the WAL.
    WALEdit edits = new WALEdit();
    for (Put p: puts) {
      CellScanner cs = p.cellScanner();
      while (cs.advance()) {
        edits.add(cs.current());
      }
    }
    // Add any old cluster id.
    List<UUID> clusterIds = new ArrayList<UUID>();
    clusterIds.add(UUID.randomUUID());
    // Now make appends run slow.
    goslow.setValue(true);
    for (int i = 0; i < countPerFamily; i++) {
      final HRegionInfo info = region.getRegionInfo();
      final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
      wal.append(htd, info, logkey, edits, true);
    }
    region.flush(true);
    // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
    long currentSequenceId = region.getSequenceId();
    // Now release the appends
    goslow.setValue(false);
    synchronized (goslow) {
      goslow.notifyAll();
    }
    assertTrue(currentSequenceId >= region.getSequenceId());
  } finally {
    region.close(true);
    wal.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:TestFSHLog.java

示例6: testFilterListWithPrefixFilter

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testFilterListWithPrefixFilter() throws IOException {
  byte[] family = Bytes.toBytes("f1");
  byte[] qualifier = Bytes.toBytes("q1");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter"));
  htd.addFamily(new HColumnDescriptor(family));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion testRegion = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);

  for(int i=0; i<5; i++) {
    Put p = new Put(Bytes.toBytes((char)('a'+i) + "row"));
    p.setDurability(Durability.SKIP_WAL);
    p.add(family, qualifier, Bytes.toBytes(String.valueOf(111+i)));
    testRegion.put(p);
  }
  testRegion.flush(true);

  // rows starting with "b"
  PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ;
  // rows with value of column 'q1' set to '113'
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(
      family, qualifier, CompareOp.EQUAL, Bytes.toBytes("113"));
  // combine these two with OR in a FilterList
  FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, pf, scvf);

  Scan s1 = new Scan();
  s1.setFilter(filterList);
  InternalScanner scanner = testRegion.getScanner(s1);
  List<Cell> results = new ArrayList<Cell>();
  int resultCount = 0;
  while (scanner.next(results)) {
    resultCount++;
    byte[] row =  CellUtil.cloneRow(results.get(0));
    LOG.debug("Found row: " + Bytes.toStringBinary(row));
    assertTrue(Bytes.equals(row, Bytes.toBytes("brow"))
        || Bytes.equals(row, Bytes.toBytes("crow")));
    results.clear();
  }
  assertEquals(2, resultCount);
  scanner.close();

  WAL wal = ((HRegion)testRegion).getWAL();
  ((HRegion)testRegion).close();
  wal.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:TestFilter.java

示例7: testScannerSelection

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testScannerSelection() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.store.delete.expired.storefile", false);
  HColumnDescriptor hcd =
    new HColumnDescriptor(FAMILY_BYTES)
        .setMaxVersions(Integer.MAX_VALUE)
        .setTimeToLive(TTL_SECONDS);
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(TABLE);
  HRegion region =
      HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()),
          conf, htd);

  long ts = EnvironmentEdgeManager.currentTime();
  long version = 0; //make sure each new set of Put's have a new ts
  for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
    if (iFile == NUM_EXPIRED_FILES) {
      Threads.sleepWithoutInterrupt(TTL_MS);
      version += TTL_MS;
    }

    for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
      Put put = new Put(Bytes.toBytes("row" + iRow));
      for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
        put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
            ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
      }
      region.put(put);
    }
    region.flush(true);
    version++;
  }

  Scan scan = new Scan();
  scan.setMaxVersions(Integer.MAX_VALUE);
  CacheConfig cacheConf = new CacheConfig(conf);
  LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
  cache.clearCache();
  InternalScanner scanner = region.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  final int expectedKVsPerRow = numFreshFiles * NUM_COLS_PER_ROW;
  int numReturnedRows = 0;
  LOG.info("Scanning the entire table");
  while (scanner.next(results) || results.size() > 0) {
    assertEquals(expectedKVsPerRow, results.size());
    ++numReturnedRows;
    results.clear();
  }
  assertEquals(NUM_ROWS, numReturnedRows);
  Set<String> accessedFiles = cache.getCachedFileNamesForTest();
  LOG.debug("Files accessed during scan: " + accessedFiles);

  // Exercise both compaction codepaths.
  if (explicitCompaction) {
    HStore store = (HStore)region.getStore(FAMILY_BYTES);
    store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles);
  } else {
    region.compact(false);
  }

  region.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:TestScannerSelectionUsingTTL.java


注:本文中的org.apache.hadoop.hbase.regionserver.HRegion.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。