当前位置: 首页>>代码示例>>Java>>正文


Java HRegion.closeHRegion方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.closeHRegion方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.closeHRegion方法的具体用法?Java HRegion.closeHRegion怎么用?Java HRegion.closeHRegion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HRegion的用法示例。


在下文中一共展示了HRegion.closeHRegion方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: bootstrap

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
    setInfoFamilyCachingForMeta(metaDescriptor, false);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null, true, true);
    setInfoFamilyCachingForMeta(metaDescriptor, true);
    HRegion.closeHRegion(meta);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:MasterFileSystem.java

示例2: createHDFSRegionDir

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Creates, flushes, and closes a new region.
 */
public static HRegion createHDFSRegionDir(Configuration conf,
    HRegionInfo hri, HTableDescriptor htd) throws IOException {
  // Create HRegion
  Path root = FSUtils.getRootDir(conf);
  HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

  // Close the new region to flush to disk. Close log file too.
  HRegion.closeHRegion(region);
  return region;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:HBaseFsckRepair.java

示例3: createRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
HRegion createRegion(final HRegionInfo  hri, final Path rootdir, final Configuration c,
    final HTableDescriptor htd)
throws IOException {
  HRegion r = HRegion.createHRegion(hri, rootdir, c, htd);
  // The above call to create a region will create an wal file.  Each
  // log file create will also create a running thread to do syncing.  We need
  // to close out this log else we will have a running thread trying to sync
  // the file system continuously which is ugly when dfs is taken away at the
  // end of the test.
  HRegion.closeHRegion(r);
  return r;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestMasterFailover.java

示例4: tearDown

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Override
public void tearDown() throws Exception {
  super.tearDown();
  for (int i = 0; i < sourceRegions.length; i++) {
    HRegion r = regions[i];
    if (r != null) {
      HRegion.closeHRegion(r);
    }
  }
  wals.close();
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestMergeTool.java

示例5: testMergeTool

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test merge tool.
 * @throws Exception
 */
public void testMergeTool() throws Exception {
  // First verify we can read the rows from the source regions and that they
  // contain the right data.
  for (int i = 0; i < regions.length; i++) {
    for (int j = 0; j < rows[i].length; j++) {
      Get get = new Get(rows[i][j]);
      get.addFamily(FAMILY);
      Result result = regions[i].get(get);
      byte [] bytes =  CellUtil.cloneValue(result.rawCells()[0]);
      assertNotNull(bytes);
      assertTrue(Bytes.equals(bytes, rows[i][j]));
    }
    // Close the region and delete the log
    HRegion.closeHRegion(regions[i]);
  }
  WAL log = wals.getWAL(new byte[]{});
   // Merge Region 0 and Region 1
  HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
    this.sourceRegions[0].getRegionNameAsString(),
    this.sourceRegions[1].getRegionNameAsString(), log, 2);

  // Merge the result of merging regions 0 and 1 with region 2
  merged = mergeAndVerify("merging regions 0+1 and 2",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[2].getRegionNameAsString(), log, 3);

  // Merge the result of merging regions 0, 1 and 2 with region 3
  merged = mergeAndVerify("merging regions 0+1+2 and 3",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[3].getRegionNameAsString(), log, 4);

  // Merge the result of merging regions 0, 1, 2 and 3 with region 4
  merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
    merged.getRegionInfo().getRegionNameAsString(),
    this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestMergeTool.java

示例6: createRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion createRegion(final HTableDescriptor desc,
    byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
throws IOException {
  HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
  HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc);
  LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
  for(int i = firstRow; i < firstRow + nrows; i++) {
    Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
    put.setDurability(Durability.SKIP_WAL);
    put.add(COLUMN_NAME, null,  VALUE);
    region.put(put);
    if (i % 10000 == 0) {
      LOG.info("Flushing write #" + i);
      region.flush(true);
    }
  }
  HRegion.closeHRegion(region);
  return region;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestMergeTable.java

示例7: setupMeta

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
protected void setupMeta(Path rootdir, final HRegion [] regions)
throws IOException {
  HRegion meta =
    HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
    UTIL.getConfiguration(), UTIL.getMetaTableDescriptor());
  for (HRegion r: regions) {
    HRegion.addRegionToMETA(meta, r);
  }
  HRegion.closeHRegion(meta);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestMergeTable.java

示例8: rebuildMeta

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Rebuilds meta from information in hdfs/fs.  Depends on configuration
 * settings passed into hbck constructor to point to a particular fs/dir.
 *
 * @param fix flag that determines if method should attempt to fix holes
 * @return true if successful, false if attempt failed.
 */
public boolean rebuildMeta(boolean fix) throws IOException,
    InterruptedException {

  // TODO check to make sure hbase is offline. (or at least the table
  // currently being worked on is off line)

  // Determine what's on HDFS
  LOG.info("Loading HBase regioninfo from HDFS...");
  loadHdfsRegionDirs(); // populating regioninfo table.

  int errs = errors.getErrorList().size();
  tablesInfo = loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
  checkHdfsIntegrity(false, false);

  // make sure ok.
  if (errors.getErrorList().size() != errs) {
    // While in error state, iterate until no more fixes possible
    while(true) {
      fixes = 0;
      suggestFixes(tablesInfo);
      errors.clear();
      loadHdfsRegionInfos(); // update tableInfos based on region info in fs.
      checkHdfsIntegrity(shouldFixHdfsHoles(), shouldFixHdfsOverlaps());

      int errCount = errors.getErrorList().size();

      if (fixes == 0) {
        if (errCount > 0) {
          return false; // failed to fix problems.
        } else {
          break; // no fixes and no problems? drop out and fix stuff!
        }
      }
    }
  }

  // we can rebuild, move old meta out of the way and start
  LOG.info("HDFS regioninfo's seems good.  Sidelining old hbase:meta");
  Path backupDir = sidelineOldMeta();

  LOG.info("Creating new hbase:meta");
  HRegion meta = createNewMeta();

  // populate meta
  List<Put> puts = generatePuts(tablesInfo);
  if (puts == null) {
    LOG.fatal("Problem encountered when creating new hbase:meta entries.  " +
      "You may need to restore the previously sidelined hbase:meta");
    return false;
  }
  meta.batchMutate(puts.toArray(new Put[puts.size()]));
  HRegion.closeHRegion(meta);
  LOG.info("Success! hbase:meta table rebuilt.");
  LOG.info("Old hbase:meta is moved into " + backupDir);
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:64,代码来源:HBaseFsck.java

示例9: closeRootAndMeta

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
protected void closeRootAndMeta() throws IOException {
  HRegion.closeHRegion(meta);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:4,代码来源:HBaseTestCase.java

示例10: testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches()
    throws Exception {
  HRegion region = null;
  try {
    int testIndex = 0;
    TEST_UTIL.startMiniZKCluster();
    final Server server = new MockServer(TEST_UTIL);
    HTableDescriptor htd = new HTableDescriptor(
        TableName.valueOf("testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches"));
    HRegionInfo hri = new HRegionInfo(htd.getTableName(),
        Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
    region = HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    assertNotNull(region);
    AssignmentManager am = Mockito.mock(AssignmentManager.class);
    RegionStates rsm = Mockito.mock(RegionStates.class);
    Mockito.doReturn(rsm).when(am).getRegionStates();
    when(rsm.isRegionInTransition(hri)).thenReturn(false);
    when(rsm.getRegionState(hri)).thenReturn(
      new RegionState(region.getRegionInfo(), RegionState.State.OPEN,
        System.currentTimeMillis(), server.getServerName()));
    // create a node with OPENED state
    zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
        region, server.getServerName());
    when(am.getTableStateManager()).thenReturn(new ZKTableStateManager(zkw));
    Stat stat = new Stat();
    String nodeName = ZKAssign.getNodeName(zkw, region.getRegionInfo()
        .getEncodedName());
    ZKUtil.getDataAndWatch(zkw, nodeName, stat);

    // use the version for the OpenedRegionHandler
    BaseCoordinatedStateManager csm = new ZkCoordinatedStateManager();
    csm.initialize(server);
    csm.start();

    OpenRegionCoordination orc = csm.getOpenRegionCoordination();
    ZkOpenRegionCoordination.ZkOpenRegionDetails zkOrd =
      new ZkOpenRegionCoordination.ZkOpenRegionDetails();
    zkOrd.setServerName(server.getServerName());
    zkOrd.setVersion(stat.getVersion());
    OpenedRegionHandler handler = new OpenedRegionHandler(server, am, region
        .getRegionInfo(), orc, zkOrd);
    // Once again overwrite the same znode so that the version changes.
    ZKAssign.transitionNode(zkw, region.getRegionInfo(), server
        .getServerName(), EventType.RS_ZK_REGION_OPENED,
        EventType.RS_ZK_REGION_OPENED, stat.getVersion());

    // Should not invoke assignmentmanager.regionOnline. If it is 
    // invoked as per current mocking it will throw null pointer exception.
    boolean expectedException = false;
    try {
      handler.process();
    } catch (Exception e) {
      expectedException = true;
    }
    assertFalse("The process method should not throw any exception.",
        expectedException);
    List<String> znodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw,
        zkw.assignmentZNode);
    String regionName = znodes.get(0);
    assertEquals("The region should not be opened successfully.", regionName,
        region.getRegionInfo().getEncodedName());
  } finally {
    HRegion.closeHRegion(region);
    TEST_UTIL.shutdownMiniZKCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:68,代码来源:TestOpenedRegionHandler.java

示例11: testCoprocessorInterface

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testCoprocessorInterface() throws IOException {
  TableName tableName = TableName.valueOf(name.getMethodName());
  byte [][] families = { fam1, fam2, fam3 };

  Configuration hc = initSplit();
  Region region = initHRegion(tableName, name.getMethodName(), hc,
    new Class<?>[]{CoprocessorImpl.class}, families);
  for (int i = 0; i < 3; i++) {
    HBaseTestCase.addContent(region, fam3);
    region.flush(true);
  }

  region.compact(false);

  byte [] splitRow = ((HRegion)region).checkSplit();

  assertNotNull(splitRow);
  Region [] regions = split(region, splitRow);
  for (int i = 0; i < regions.length; i++) {
    regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
  }
  HRegion.closeHRegion((HRegion)region);
  Coprocessor c = region.getCoprocessorHost().
    findCoprocessor(CoprocessorImpl.class.getName());

  // HBASE-4197
  Scan s = new Scan();
  RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
  assertTrue(scanner instanceof CustomScanner);
  // this would throw an exception before HBASE-4197
  scanner.next(new ArrayList<Cell>());

  assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
  assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
  assertTrue(((CoprocessorImpl)c).wasOpened());
  assertTrue(((CoprocessorImpl)c).wasClosed());
  assertTrue(((CoprocessorImpl)c).wasFlushed());
  assertTrue(((CoprocessorImpl)c).wasCompacted());
  assertTrue(((CoprocessorImpl)c).wasSplit());

  for (int i = 0; i < regions.length; i++) {
    HRegion.closeHRegion((HRegion)regions[i]);
    c = region.getCoprocessorHost()
          .findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
    assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
    assertTrue(((CoprocessorImpl)c).wasOpened());
    assertTrue(((CoprocessorImpl)c).wasClosed());
    assertTrue(((CoprocessorImpl)c).wasCompacted());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:TestCoprocessorInterface.java

示例12: testFlushSequenceIdIsGreaterThanAllEditsInHFile

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test flush for sure has a sequence id that is beyond the last edit appended.  We do this
 * by slowing appends in the background ring buffer thread while in foreground we call
 * flush.  The addition of the sync over HRegion in flush should fix an issue where flush was
 * returning before all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException
 * @see HBASE-11109
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
  String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
  final TableName tableName = TableName.valueOf(testName);
  final HRegionInfo hri = new HRegionInfo(tableName);
  final byte[] rowName = tableName.getName();
  final HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
    TEST_UTIL.getConfiguration(), htd);
  HRegion.closeHRegion(r);
  final int countPerFamily = 10;
  final MutableBoolean goslow = new MutableBoolean(false);
  // subclass and doctor a method.
  FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
      testName, conf) {
    @Override
    void atHeadOfRingBufferEventHandlerAppend() {
      if (goslow.isTrue()) {
        Threads.sleep(100);
        LOG.debug("Sleeping before appending 100ms");
      }
      super.atHeadOfRingBufferEventHandlerAppend();
    }
  };
  HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
    TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
  EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
  try {
    List<Put> puts = null;
    for (HColumnDescriptor hcd: htd.getFamilies()) {
      puts =
        TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
    }

    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

    // Construct a WALEdit and add it a few times to the WAL.
    WALEdit edits = new WALEdit();
    for (Put p: puts) {
      CellScanner cs = p.cellScanner();
      while (cs.advance()) {
        edits.add(cs.current());
      }
    }
    // Add any old cluster id.
    List<UUID> clusterIds = new ArrayList<UUID>();
    clusterIds.add(UUID.randomUUID());
    // Now make appends run slow.
    goslow.setValue(true);
    for (int i = 0; i < countPerFamily; i++) {
      final HRegionInfo info = region.getRegionInfo();
      final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
      wal.append(htd, info, logkey, edits, true);
    }
    region.flush(true);
    // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
    long currentSequenceId = region.getSequenceId();
    // Now release the appends
    goslow.setValue(false);
    synchronized (goslow) {
      goslow.notifyAll();
    }
    assertTrue(currentSequenceId >= region.getSequenceId());
  } finally {
    region.close(true);
    wal.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:TestFSHLog.java

示例13: testFailedFlushAborts

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test that if we fail a flush, abort gets set on close.
 * @see <a href="https://issues.apache.org/jira/browse/HBASE-4270">HBASE-4270</a>
 * @throws IOException
 * @throws NodeExistsException
 * @throws KeeperException
 */
@Test public void testFailedFlushAborts()
throws IOException, NodeExistsException, KeeperException {
  final Server server = new MockServer(HTU, false);
  final RegionServerServices rss = HTU.createMockRegionServerService();
  HTableDescriptor htd = TEST_HTD;
  final HRegionInfo hri =
    new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
      HConstants.EMPTY_END_ROW);
  HRegion region = HTU.createLocalHRegion(hri,  htd);
  try {
    assertNotNull(region);
    // Spy on the region so can throw exception when close is called.
    HRegion spy = Mockito.spy(region);
    final boolean abort = false;
    Mockito.when(spy.close(abort)).
    thenThrow(new IOException("Mocked failed close!"));
    // The CloseRegionHandler will try to get an HRegion that corresponds
    // to the passed hri -- so insert the region into the online region Set.
    rss.addToOnlineRegions(spy);
    // Assert the Server is NOT stopped before we call close region.
    assertFalse(server.isStopped());

    ZkCoordinatedStateManager consensusProvider = new ZkCoordinatedStateManager();
    consensusProvider.initialize(server);
    consensusProvider.start();

    ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd =
      new ZkCloseRegionCoordination.ZkCloseRegionDetails();
    zkCrd.setPublishStatusInZk(false);
    zkCrd.setExpectedVersion(-1);

    CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false,
          consensusProvider.getCloseRegionCoordination(), zkCrd);
    boolean throwable = false;
    try {
      handler.process();
    } catch (Throwable t) {
      throwable = true;
    } finally {
      assertTrue(throwable);
      // Abort calls stop so stopped flag should be set.
      assertTrue(server.isStopped());
    }
  } finally {
    HRegion.closeHRegion(region);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:55,代码来源:TestCloseRegionHandler.java

示例14: testRegionServerAbortionDueToFailureTransitioningToOpened

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test the openregionhandler can deal with perceived failure of transitioning to OPENED state
 * due to intermittent zookeeper malfunctioning.
 * @see <a href="https://issues.apache.org/jira/browse/HBASE-9387">HBASE-9387</a>
 * @throws IOException
 * @throws NodeExistsException
 * @throws KeeperException
 */
@Test
public void testRegionServerAbortionDueToFailureTransitioningToOpened()
    throws IOException, NodeExistsException, KeeperException {
  final Server server = new MockServer(HTU);
  final RegionServerServices rss = HTU.createMockRegionServerService();

  HTableDescriptor htd = TEST_HTD;
  final HRegionInfo hri = TEST_HRI;
  HRegion region =
       HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
          .getConfiguration(), htd);
  assertNotNull(region);
  try {

    ZkCoordinatedStateManager csm = new ZkCoordinatedStateManager();
    csm.initialize(server);
    csm.start();

    ZkOpenRegionCoordination.ZkOpenRegionDetails zkCrd =
      new ZkOpenRegionCoordination.ZkOpenRegionDetails();
    zkCrd.setServerName(server.getServerName());

    ZkOpenRegionCoordination openRegionCoordination =
      new ZkOpenRegionCoordination(csm, server.getZooKeeper()) {
      @Override
      public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord)
          throws IOException {
        // remove znode simulating intermittent zookeeper connection issue
        ZooKeeperWatcher zkw = server.getZooKeeper();
        String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
        try {
          ZKUtil.deleteNodeFailSilent(zkw, node);
        } catch (KeeperException e) {
          throw new RuntimeException("Ugh failed delete of " + node, e);
        }
        // then try to transition to OPENED
        return super.transitionToOpened(r, ord);
      }
    };

    OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd,
      -1, openRegionCoordination, zkCrd);
    rss.getRegionsInTransitionInRS().put(
      hri.getEncodedNameAsBytes(), Boolean.TRUE);
    // Call process without first creating OFFLINE region in zk, see if
    // exception or just quiet return (expected).
    handler.process();
    rss.getRegionsInTransitionInRS().put(
      hri.getEncodedNameAsBytes(), Boolean.TRUE);
    ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
    // Call process again but this time yank the zk znode out from under it
    // post OPENING; again will expect it to come back w/o NPE or exception.
    handler.process();
  } catch (IOException ioe) {
  } finally {
    HRegion.closeHRegion(region);
  }
  // Region server is expected to abort due to OpenRegionHandler perceiving transitioning
  // to OPENED as failed
  // This was corresponding to the second handler.process() call above.
  assertTrue("region server should have aborted", server.isAborted());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:71,代码来源:TestOpenRegionHandler.java

示例15: tearDown

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
  HRegion.closeHRegion(this.region);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:TestDependentColumnFilter.java


注:本文中的org.apache.hadoop.hbase.regionserver.HRegion.closeHRegion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。