当前位置: 首页>>代码示例>>Java>>正文


Java Region.compact方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.Region.compact方法的典型用法代码示例。如果您正苦于以下问题:Java Region.compact方法的具体用法?Java Region.compact怎么用?Java Region.compact使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.Region的用法示例。


在下文中一共展示了Region.compact方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: compact

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
/**
 * Call flushCache on all regions on all participating regionservers.
 * @throws IOException
 */
public void compact(boolean major) throws IOException {
  for (JVMClusterUtil.RegionServerThread t:
      this.hbaseCluster.getRegionServers()) {
    for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
      r.compact(major);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:MiniHBaseCluster.java

示例2: loadFlushAndCompact

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void loadFlushAndCompact(Region region, byte[] family) throws IOException {
  // create two hfiles in the region
  createHFileInRegion(region, family);
  createHFileInRegion(region, family);

  Store s = region.getStore(family);
  int count = s.getStorefilesCount();
  assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count,
    count >= 2);

  // compact the two files into one file to get files in the archive
  LOG.debug("Compacting stores");
  region.compact(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例3: testEncodedSeeker

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testEncodedSeeker() throws IOException {
  System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : "
      + includeTags + ", compressTags : " + compressTags);
  if(includeTags) {
    testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
  }
  LruBlockCache cache =
    (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
  cache.clearCache();
  // Need to disable default row bloom filter for this test to pass.
  HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
      setDataBlockEncoding(encoding).
      setBlocksize(BLOCK_SIZE).
      setBloomFilterType(BloomType.NONE).
      setCompressTags(compressTags);
  Region region = testUtil.createTestRegion(TABLE_NAME, hcd);

  //write the data, but leave some in the memstore
  doPuts(region);

  //verify correctness when memstore contains data
  doGets(region);

  //verify correctness again after compacting
  region.compact(false);
  doGets(region);

  Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();

  // Ensure that compactions don't pollute the cache with unencoded blocks
  // in case of in-cache-only encoding.
  System.err.println("encodingCounts=" + encodingCounts);
  assertEquals(1, encodingCounts.size());
  DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
  assertEquals(encoding, encodingInCache);
  assertTrue(encodingCounts.get(encodingInCache) > 0);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestEncodedSeekers.java

示例4: testSharedData

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();
  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
  }
  Coprocessor c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  Coprocessor c2 = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  for (int i = 1; i < regions.length; i++) {
    c = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    c2 = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    // make sure that all coprocessor of a class have identical sharedDatas
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
  }
  // now have all Environments fail
  for (int i = 0; i < regions.length; i++) {
    try {
      byte [] r = regions[i].getRegionInfo().getStartKey();
      if (r == null || r.length <= 0) {
        // Its the start row.  Can't ask for null.  Ask for minimal key instead.
        r = new byte [] {0};
      }
      Get g = new Get(r);
      regions[i].get(g);
      fail();
    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
    }
    assertNull(regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName()));
  }
  c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:77,代码来源:TestCoprocessorInterface.java

示例5: testCoprocessorInterface

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();

  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
  }
  HRegion.closeHRegion((HRegion)region);
  Coprocessor c = region.getCoprocessorHost().
    findCoprocessor(CoprocessorImpl.class.getName());

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<Cell>());

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
  assertTrue(((CoprocessorImpl)c).wasSplit());

  for (int i = 0; i < regions.length; i++) {
    HRegion.closeHRegion((HRegion)regions[i]);
    c = region.getCoprocessorHost()
          .findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
    assertTrue(((CoprocessorImpl)c).wasCompacted());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:TestCoprocessorInterface.java

示例6: testCompactionRecordDoesntBlockRolling

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
  Table table = null;
  Table table2 = null;

  // When the hbase:meta table can be opened, the region servers are running
  Table t = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  try {
    table = createTestTable(getName());
    table2 = createTestTable(getName() + "1");

    server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
    final WAL log = server.getWAL(null);
    Region region = server.getOnlineRegions(table2.getName()).get(0);
    Store s = region.getStore(HConstants.CATALOG_FAMILY);

    //have to flush namespace to ensure it doesn't affect wall tests
    admin.flush(TableName.NAMESPACE_TABLE_NAME);

    // Put some stuff into table2, to make sure we have some files to compact.
    for (int i = 1; i <= 2; ++i) {
      doPut(table2, i);
      admin.flush(table2.getName());
    }
    doPut(table2, 3); // don't flush yet, or compaction might trigger before we roll WAL
    assertEquals("Should have no WAL after initial writes", 0,
        DefaultWALProvider.getNumRolledLogFiles(log));
    assertEquals(2, s.getStorefilesCount());

    // Roll the log and compact table2, to have compaction record in the 2nd WAL.
    log.rollWriter();
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
    admin.flush(table2.getName());
    region.compact(false);
    // Wait for compaction in case if flush triggered it before us.
    Assert.assertNotNull(s);
    for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
      Threads.sleepWithoutInterrupt(200);
    }
    assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());

    // Write some value to the table so the WAL cannot be deleted until table is flushed.
    doPut(table, 0); // Now 2nd WAL will have compaction record for table2 and put for table.
    log.rollWriter(); // 1st WAL deleted, 2nd not deleted yet.
    assertEquals("Should have WAL; one table is not flushed", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));

    // Flush table to make latest WAL obsolete; write another record, and roll again.
    admin.flush(table.getName());
    doPut(table, 1);
    log.rollWriter(); // Now 2nd WAL is deleted and 3rd is added.
    assertEquals("Should have 1 WALs at the end", 1,
        DefaultWALProvider.getNumRolledLogFiles(log));
  } finally {
    if (t != null) t.close();
    if (table != null) table.close();
    if (table2 != null) table2.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:TestLogRolling.java

示例7: testNotCachingDataBlocksDuringCompactionInternals

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags)
    throws IOException, InterruptedException {
  if (useTags) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  } else {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
  }
  // TODO: need to change this test if we add a cache size threshold for
  // compactions, or if we implement some other kind of intelligent logic for
  // deciding what blocks to cache-on-write on compaction.
  final String table = "CompactionCacheOnWrite";
  final String cf = "myCF";
  final byte[] cfBytes = Bytes.toBytes(cf);
  final int maxVersions = 3;
  Region region = TEST_UTIL.createTestRegion(table, 
      new HColumnDescriptor(cf)
          .setCompressionType(compress)
          .setBloomFilterType(BLOOM_TYPE)
          .setMaxVersions(maxVersions)
          .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
  );
  int rowIdx = 0;
  long ts = EnvironmentEdgeManager.currentTime();
  for (int iFile = 0; iFile < 5; ++iFile) {
    for (int iRow = 0; iRow < 500; ++iRow) {
      String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 
          iRow;
      Put p = new Put(Bytes.toBytes(rowStr));
      ++rowIdx;
      for (int iCol = 0; iCol < 10; ++iCol) {
        String qualStr = "col" + iCol;
        String valueStr = "value_" + rowStr + "_" + qualStr;
        for (int iTS = 0; iTS < 5; ++iTS) {
          if (useTags) {
            Tag t = new Tag((byte) 1, "visibility");
            Tag[] tags = new Tag[1];
            tags[0] = t;
            KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),
                HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
            p.add(kv);
          } else {
            p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
          }
        }
      }
      p.setDurability(Durability.ASYNC_WAL);
      region.put(p);
    }
    region.flush(true);
  }
  clearBlockCache(blockCache);
  assertEquals(0, blockCache.getBlockCount());
  region.compact(false);
  LOG.debug("compactStores() returned");

  for (CachedBlock block: blockCache) {
    assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
    assertNotEquals(BlockType.DATA, block.getBlockType());
  }
  ((HRegion)region).close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:62,代码来源:TestCacheOnWrite.java


注:本文中的org.apache.hadoop.hbase.regionserver.Region.compact方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。