当前位置: 首页>>代码示例>>Java>>正文


Java HRegion.getRegionInfo方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.getRegionInfo方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.getRegionInfo方法的具体用法?Java HRegion.getRegionInfo怎么用?Java HRegion.getRegionInfo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HRegion的用法示例。


在下文中一共展示了HRegion.getRegionInfo方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: startSplitTransaction

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Creates a new ephemeral node in the PENDING_SPLIT state for the specified region. Create it
 * ephemeral in case regionserver dies mid-split.
 * <p>
 * Does not transition nodes from other states. If a node already exists for this region, an
 * Exception will be thrown.
 * @param parent region to be created as offline
 * @param serverName server event originates from
 * @param hri_a daughter region
 * @param hri_b daughter region
 * @throws IOException
 */

@Override
public void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a,
    HRegionInfo hri_b) throws IOException {

  HRegionInfo region = parent.getRegionInfo();
  try {

    LOG.debug(watcher.prefix("Creating ephemeral node for " + region.getEncodedName()
        + " in PENDING_SPLIT state"));
    byte[] payload = HRegionInfo.toDelimitedByteArray(hri_a, hri_b);
    RegionTransition rt =
        RegionTransition.createRegionTransition(RS_ZK_REQUEST_REGION_SPLIT,
          region.getRegionName(), serverName, payload);
    String node = ZKAssign.getNodeName(watcher, region.getEncodedName());
    if (!ZKUtil.createEphemeralNodeAndWatch(watcher, node, rt.toByteArray())) {
      throw new IOException("Failed create of ephemeral " + node);
    }

  } catch (KeeperException e) {
    throw new IOException("Failed creating PENDING_SPLIT znode on "
        + parent.getRegionInfo().getRegionNameAsString(), e);
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:ZKSplitTransactionCoordination.java

示例2: createRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Create new set of regions on the specified file-system.
 * @param conf {@link Configuration}
 * @param rootDir Root directory for HBase instance
 * @param tableDir table directory
 * @param hTableDescriptor description of the table
 * @param newRegion {@link HRegionInfo} that describes the region to create
 * @param task {@link RegionFillTask} custom code to populate region after creation
 * @throws IOException
 */
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
    final Path tableDir, final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
    final RegionFillTask task) throws IOException {
  // 1. Create HRegion
  HRegion region = HRegion.createHRegion(newRegion,
    rootDir, tableDir, conf, hTableDescriptor, null,
    false, true);
  try {
    // 2. Custom user code to interact with the created region
    if (task != null) {
      task.fillRegion(region);
    }
  } finally {
    // 3. Close the new region to flush to disk. Close log file too.
    region.close();
  }
  return region.getRegionInfo();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:ModifyRegionUtils.java

示例3: runwarmup

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
protected void runwarmup()  throws InterruptedException{
  Thread thread = new Thread(new Runnable() {
    @Override
    public void run() {
      HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
      HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0);
      HRegionInfo info = region.getRegionInfo();

      try {
        HTableDescriptor htd = table.getTableDescriptor();
        for (int i = 0; i < 10; i++) {
          warmupHRegion(info, htd, rs.getWAL(info), rs.getConfiguration(), rs, null);
        }

      } catch (IOException ie) {
        LOG.error("Failed warming up region " + info.getRegionNameAsString(), ie);
      }
    }
  });
  thread.start();
  thread.join();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestWarmupRegion.java

示例4: transitionToOpened

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * @param r Region we're working on.
 * @return whether znode is successfully transitioned to OPENED state.
 * @throws java.io.IOException
 */
@Override
public boolean transitionToOpened(final HRegion r, OpenRegionDetails ord) throws IOException {
  ZkOpenRegionDetails zkOrd = (ZkOpenRegionDetails) ord;

  boolean result = false;
  HRegionInfo hri = r.getRegionInfo();
  final String name = hri.getRegionNameAsString();
  // Finally, Transition ZK node to OPENED
  try {
    if (ZKAssign.transitionNodeOpened(watcher, hri,
      zkOrd.getServerName(), zkOrd.getVersion()) == -1) {
      String warnMsg = "Completed the OPEN of region " + name +
        " but when transitioning from " + " OPENING to OPENED ";
      try {
        String node = ZKAssign.getNodeName(watcher, hri.getEncodedName());
        if (ZKUtil.checkExists(watcher, node) < 0) {
          // if the znode
          coordination.getServer().abort(warnMsg + "the znode disappeared", null);
        } else {
          LOG.warn(warnMsg + "got a version mismatch, someone else clashed; " +
            "so now unassigning -- closing region on server: " + zkOrd.getServerName());
        }
      } catch (KeeperException ke) {
        coordination.getServer().abort(warnMsg, ke);
      }
    } else {
      LOG.debug("Transitioned " + r.getRegionInfo().getEncodedName() +
        " to OPENED in zk on " + zkOrd.getServerName());
      result = true;
    }
  } catch (KeeperException e) {
    LOG.error("Failed transitioning node " + name +
      " from OPENING to OPENED -- closing region", e);
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:ZkOpenRegionCoordination.java

示例5: merge

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion merge(final HTableDescriptor htd, HRegion meta,
                      HRegionInfo info1, HRegionInfo info2)
throws IOException {
  if (info1 == null) {
    throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
        Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
  }
  if (info2 == null) {
    throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
        Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
  }
  HRegion merged = null;
  HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf());
  try {
    HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf());
    try {
      merged = HRegion.merge(r1, r2);
    } finally {
      if (!r2.isClosed()) {
        r2.close();
      }
    }
  } finally {
    if (!r1.isClosed()) {
      r1.close();
    }
  }

  // Remove the old regions from meta.
  // HRegion.merge has already deleted their files

  removeRegionFromMeta(meta, info1);
  removeRegionFromMeta(meta, info2);

  this.mergeInfo = merged.getRegionInfo();
  return merged;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:Merge.java

示例6: testWarmup

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Basic client side validation of HBASE-4536
*/
@Test
public void testWarmup() throws Exception {
  int serverid = 0;
  HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0);
  HRegionInfo info = region.getRegionInfo();
  runwarmup();
  for (int i = 0; i < 10; i++) {
    HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverid);
    byte [] destName = Bytes.toBytes(rs.getServerName().toString());
    TEST_UTIL.getMiniHBaseCluster().getMaster().move(info.getEncodedNameAsBytes(), destName);
    serverid = (serverid + 1) % 2;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestWarmupRegion.java

示例7: testInterClusterReplication

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test (timeout=120000)
public void testInterClusterReplication() throws Exception {
  final String id = "testInterClusterReplication";

  List<HRegion> regions = utility1.getHBaseCluster().getRegions(tableName);
  int totEdits = 0;

  // Make sure edits are spread across regions because we do region based batching
  // before shipping edits.
  for(HRegion region: regions) {
    HRegionInfo hri = region.getRegionInfo();
    byte[] row = hri.getStartKey();
    for (int i = 0; i < 100; i++) {
      if (row.length > 0) {
        Put put = new Put(row);
        put.addColumn(famName, row, row);
        region.put(put);
        totEdits++;
      }
    }
  }

  admin.addPeer(id,
      new ReplicationPeerConfig().setClusterKey(ZKConfig.getZooKeeperClusterKey(conf2))
          .setReplicationEndpointImpl(InterClusterReplicationEndpointForTest.class.getName()),
      null);

  final int numEdits = totEdits;
  Waiter.waitFor(conf1, 30000, new Waiter.ExplainingPredicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return InterClusterReplicationEndpointForTest.replicateCount.get() == numEdits;
    }
    @Override
    public String explainFailure() throws Exception {
      String failure = "Failed to replicate all edits, expected = " + numEdits
          + " replicated = " + InterClusterReplicationEndpointForTest.replicateCount.get();
      return failure;
    }
  });

  admin.removePeer("testInterClusterReplication");
  utility1.deleteTableData(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:TestReplicationEndpoint.java

示例8: testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
@Test
public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches()
    throws Exception {
  HRegion region = null;
  try {
    int testIndex = 0;
    TEST_UTIL.startMiniZKCluster();
    final Server server = new MockServer(TEST_UTIL);
    HTableDescriptor htd = new HTableDescriptor(
        TableName.valueOf("testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches"));
    HRegionInfo hri = new HRegionInfo(htd.getTableName(),
        Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
    region = HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    assertNotNull(region);
    AssignmentManager am = Mockito.mock(AssignmentManager.class);
    RegionStates rsm = Mockito.mock(RegionStates.class);
    Mockito.doReturn(rsm).when(am).getRegionStates();
    when(rsm.isRegionInTransition(hri)).thenReturn(false);
    when(rsm.getRegionState(hri)).thenReturn(
      new RegionState(region.getRegionInfo(), RegionState.State.OPEN,
        System.currentTimeMillis(), server.getServerName()));
    // create a node with OPENED state
    zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
        region, server.getServerName());
    when(am.getTableStateManager()).thenReturn(new ZKTableStateManager(zkw));
    Stat stat = new Stat();
    String nodeName = ZKAssign.getNodeName(zkw, region.getRegionInfo()
        .getEncodedName());
    ZKUtil.getDataAndWatch(zkw, nodeName, stat);

    // use the version for the OpenedRegionHandler
    BaseCoordinatedStateManager csm = new ZkCoordinatedStateManager();
    csm.initialize(server);
    csm.start();

    OpenRegionCoordination orc = csm.getOpenRegionCoordination();
    ZkOpenRegionCoordination.ZkOpenRegionDetails zkOrd =
      new ZkOpenRegionCoordination.ZkOpenRegionDetails();
    zkOrd.setServerName(server.getServerName());
    zkOrd.setVersion(stat.getVersion());
    OpenedRegionHandler handler = new OpenedRegionHandler(server, am, region
        .getRegionInfo(), orc, zkOrd);
    // Once again overwrite the same znode so that the version changes.
    ZKAssign.transitionNode(zkw, region.getRegionInfo(), server
        .getServerName(), EventType.RS_ZK_REGION_OPENED,
        EventType.RS_ZK_REGION_OPENED, stat.getVersion());

    // Should not invoke assignmentmanager.regionOnline. If it is 
    // invoked as per current mocking it will throw null pointer exception.
    boolean expectedException = false;
    try {
      handler.process();
    } catch (Exception e) {
      expectedException = true;
    }
    assertFalse("The process method should not throw any exception.",
        expectedException);
    List<String> znodes = ZKUtil.listChildrenAndWatchForNewChildren(zkw,
        zkw.assignmentZNode);
    String regionName = znodes.get(0);
    assertEquals("The region should not be opened successfully.", regionName,
        region.getRegionInfo().getEncodedName());
  } finally {
    HRegion.closeHRegion(region);
    TEST_UTIL.shutdownMiniZKCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:68,代码来源:TestOpenedRegionHandler.java

示例9: testFlushSequenceIdIsGreaterThanAllEditsInHFile

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test flush for sure has a sequence id that is beyond the last edit appended.  We do this
 * by slowing appends in the background ring buffer thread while in foreground we call
 * flush.  The addition of the sync over HRegion in flush should fix an issue where flush was
 * returning before all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException
 * @see HBASE-11109
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
  String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
  final TableName tableName = TableName.valueOf(testName);
  final HRegionInfo hri = new HRegionInfo(tableName);
  final byte[] rowName = tableName.getName();
  final HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
    TEST_UTIL.getConfiguration(), htd);
  HRegion.closeHRegion(r);
  final int countPerFamily = 10;
  final MutableBoolean goslow = new MutableBoolean(false);
  // subclass and doctor a method.
  FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
      testName, conf) {
    @Override
    void atHeadOfRingBufferEventHandlerAppend() {
      if (goslow.isTrue()) {
        Threads.sleep(100);
        LOG.debug("Sleeping before appending 100ms");
      }
      super.atHeadOfRingBufferEventHandlerAppend();
    }
  };
  HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
    TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
  EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
  try {
    List<Put> puts = null;
    for (HColumnDescriptor hcd: htd.getFamilies()) {
      puts =
        TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
    }

    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

    // Construct a WALEdit and add it a few times to the WAL.
    WALEdit edits = new WALEdit();
    for (Put p: puts) {
      CellScanner cs = p.cellScanner();
      while (cs.advance()) {
        edits.add(cs.current());
      }
    }
    // Add any old cluster id.
    List<UUID> clusterIds = new ArrayList<UUID>();
    clusterIds.add(UUID.randomUUID());
    // Now make appends run slow.
    goslow.setValue(true);
    for (int i = 0; i < countPerFamily; i++) {
      final HRegionInfo info = region.getRegionInfo();
      final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
      wal.append(htd, info, logkey, edits, true);
    }
    region.flush(true);
    // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
    long currentSequenceId = region.getSequenceId();
    // Now release the appends
    goslow.setValue(false);
    synchronized (goslow) {
      goslow.notifyAll();
    }
    assertTrue(currentSequenceId >= region.getSequenceId());
  } finally {
    region.close(true);
    wal.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:TestFSHLog.java


注:本文中的org.apache.hadoop.hbase.regionserver.HRegion.getRegionInfo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。