当前位置: 首页>>代码示例>>Java>>正文


Java Region.flush方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.Region.flush方法的典型用法代码示例。如果您正苦于以下问题:Java Region.flush方法的具体用法?Java Region.flush怎么用?Java Region.flush使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.Region的用法示例。


在下文中一共展示了Region.flush方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testWALRollWriting

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testWALRollWriting() throws Exception {
  setUpforLogRolling();
  String className = this.getClass().getName();
  StringBuilder v = new StringBuilder(className);
  while (v.length() < 1000) {
    v.append(className);
  }
  byte[] value = Bytes.toBytes(v.toString());
  HRegionServer regionServer = startAndWriteData(TableName.valueOf("TestLogRolling"), value);
  LOG.info("after writing there are "
      + DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files");

  // flush all regions
  for (Region r : regionServer.getOnlineRegionsLocalContext()) {
    r.flush(true);
  }
  admin.rollWALWriter(regionServer.getServerName());
  int count = DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null));
  LOG.info("after flushing all regions and rolling logs there are " +
      count + " log files");
  assertTrue(("actual count: " + count), count <= 2);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestAdmin2.java

示例2: setUpBeforeClass

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  cluster = TEST_UTIL.startMiniCluster(1, ServerNum);
  table = TEST_UTIL.createTable(tableName, FAMILY, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  TEST_UTIL.waitTableAvailable(tableName, 1000);
  TEST_UTIL.loadTable(table, FAMILY);

  for (int i = 0; i < ServerNum; i++) {
    HRegionServer server = cluster.getRegionServer(i);
    for (Region region : server.getOnlineRegions(tableName)) {
      region.flush(true);
    }
  }

  finder.setConf(TEST_UTIL.getConfiguration());
  finder.setServices(cluster.getMaster());
  finder.setClusterStatus(cluster.getMaster().getClusterStatus());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestRegionLocationFinder.java

示例3: testRegionObserverFlushTimeStacking

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  Configuration conf = HBaseConfiguration.create();
  Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestRegionObserverScannerOpenHook.java

示例4: testLogRolling

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
/**
 * Tests that logs are deleted
 * @throws IOException
 * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
 */
@Test
public void testLogRolling() throws Exception {
  this.tableName = getName();
    // TODO: Why does this write data take for ever?
    startAndWriteData();
  final WAL log = server.getWAL(null);
  LOG.info("after writing there are " + DefaultWALProvider.getNumRolledLogFiles(log) +
      " log files");

    // flush all regions
    for (Region r: server.getOnlineRegionsLocalContext()) {
      r.flush(true);
    }

    // Now roll the log
    log.rollWriter();

  int count = DefaultWALProvider.getNumRolledLogFiles(log);
  LOG.info("after flushing all regions and rolling logs there are " + count + " log files");
    assertTrue(("actual count: " + count), count <= 2);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestLogRolling.java

示例5: flushcache

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
/**
 * Call flushCache on all regions on all participating regionservers.
 * @throws IOException
 */
public void flushcache() throws IOException {
  for (JVMClusterUtil.RegionServerThread t:
      this.hbaseCluster.getRegionServers()) {
    for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) {
      r.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:MiniHBaseCluster.java

示例6: createHFileInRegion

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
/**
 * Create a new hfile in the passed region
 * @param region region to operate on
 * @param columnFamily family for which to add data
 * @throws IOException
 */
private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException {
  // put one row in the region
  Put p = new Put(Bytes.toBytes("row"));
  p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1"));
  region.put(p);
  // flush the region to make a store file
  region.flush(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestZooKeeperTableArchiveClient.java

示例7: writeTestData

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void writeTestData(Region region) throws IOException {
  for (int i = 0; i < NUM_ROWS; ++i) {
    Put put = new Put(Bytes.toBytes("row" + i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
        put.add(CF_BYTES, Bytes.toBytes("col" + j), ts,
            Bytes.toBytes("value" + i + "_" + j + "_" + ts));
      }
    }
    region.put(put);
    if ((i + 1) % ROWS_PER_HFILE == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestForceCacheImportantBlocks.java

示例8: doPuts

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void doPuts(Region region) throws IOException{
  LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
    byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Put put = new Put(key);
      put.setDurability(Durability.ASYNC_WAL);
      byte[] col = Bytes.toBytes(String.valueOf(j));
      byte[] value = dataGenerator.generateRandomSizeValue(key, col);
      if (includeTags) {
        Tag[] tag = new Tag[1];
        tag[0] = new Tag((byte) 1, "Visibility");
        KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
        put.add(kv);
      } else {
        put.add(CF_BYTES, col, value);
      }
      if(VERBOSE){
        KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
        System.err.println(Strings.padFront(i+"", ' ', 4)+" "+kvPut);
      }
      region.put(put);
    }
    if (i % NUM_ROWS_PER_FLUSH == 0) {
      region.flush(true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestEncodedSeekers.java

示例9: testSharedData

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testSharedData() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{}, families);

  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();
  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
  }
  Coprocessor c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  Coprocessor c2 = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
  Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertNotNull(o);
  assertNotNull(o2);
  // to coprocessors get different sharedDatas
  assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
  for (int i = 1; i < regions.length; i++) {
    c = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    c2 = regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    // make sure that all coprocessor of a class have identical sharedDatas
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
  }
  // now have all Environments fail
  for (int i = 0; i < regions.length; i++) {
    try {
      byte [] r = regions[i].getRegionInfo().getStartKey();
      if (r == null || r.length <= 0) {
        // Its the start row.  Can't ask for null.  Ask for minimal key instead.
        r = new byte [] {0};
      }
      Get g = new Get(r);
      regions[i].get(g);
      fail();
    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
    }
    assertNull(regions[i].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName()));
  }
  c = regions[0].getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c = c2 = null;
  // perform a GC
  System.gc();
  // reopen the region
  region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
  c = region.getCoprocessorHost().
      findCoprocessor(CoprocessorImpl.class.getName());
  // CPimpl is unaffected, still the same reference
  assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
  c2 = region.getCoprocessorHost().
      findCoprocessor(CoprocessorII.class.getName());
  // new map and object created, hence the reference is different
  // hence the old entry was indeed removed by the GC and new one has been created
  Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
  assertFalse(o3 == o2);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:77,代码来源:TestCoprocessorInterface.java

示例10: testCoprocessorInterface

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();

  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
  }
  HRegion.closeHRegion((HRegion)region);
  Coprocessor c = region.getCoprocessorHost().
    findCoprocessor(CoprocessorImpl.class.getName());

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<Cell>());

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
  assertTrue(((CoprocessorImpl)c).wasSplit());

  for (int i = 0; i < regions.length; i++) {
    HRegion.closeHRegion((HRegion)regions[i]);
    c = region.getCoprocessorHost()
          .findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
    assertTrue(((CoprocessorImpl)c).wasCompacted());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:TestCoprocessorInterface.java

示例11: testNotCachingDataBlocksDuringCompactionInternals

import org.apache.hadoop.hbase.regionserver.Region; //导入方法依赖的package包/类
private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags)
    throws IOException, InterruptedException {
  if (useTags) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  } else {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
  }
  // TODO: need to change this test if we add a cache size threshold for
  // compactions, or if we implement some other kind of intelligent logic for
  // deciding what blocks to cache-on-write on compaction.
  final String table = "CompactionCacheOnWrite";
  final String cf = "myCF";
  final byte[] cfBytes = Bytes.toBytes(cf);
  final int maxVersions = 3;
  Region region = TEST_UTIL.createTestRegion(table, 
      new HColumnDescriptor(cf)
          .setCompressionType(compress)
          .setBloomFilterType(BLOOM_TYPE)
          .setMaxVersions(maxVersions)
          .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
  );
  int rowIdx = 0;
  long ts = EnvironmentEdgeManager.currentTime();
  for (int iFile = 0; iFile < 5; ++iFile) {
    for (int iRow = 0; iRow < 500; ++iRow) {
      String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + 
          iRow;
      Put p = new Put(Bytes.toBytes(rowStr));
      ++rowIdx;
      for (int iCol = 0; iCol < 10; ++iCol) {
        String qualStr = "col" + iCol;
        String valueStr = "value_" + rowStr + "_" + qualStr;
        for (int iTS = 0; iTS < 5; ++iTS) {
          if (useTags) {
            Tag t = new Tag((byte) 1, "visibility");
            Tag[] tags = new Tag[1];
            tags[0] = t;
            KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),
                HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
            p.add(kv);
          } else {
            p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
          }
        }
      }
      p.setDurability(Durability.ASYNC_WAL);
      region.put(p);
    }
    region.flush(true);
  }
  clearBlockCache(blockCache);
  assertEquals(0, blockCache.getBlockCount());
  region.compact(false);
  LOG.debug("compactStores() returned");

  for (CachedBlock block: blockCache) {
    assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType());
    assertNotEquals(BlockType.DATA, block.getBlockType());
  }
  ((HRegion)region).close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:62,代码来源:TestCacheOnWrite.java


注:本文中的org.apache.hadoop.hbase.regionserver.Region.flush方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。