本文整理汇总了Java中org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread.getRegionServer方法的典型用法代码示例。如果您正苦于以下问题:Java RegionServerThread.getRegionServer方法的具体用法?Java RegionServerThread.getRegionServer怎么用?Java RegionServerThread.getRegionServer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread
的用法示例。
在下文中一共展示了RegionServerThread.getRegionServer方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: populateDataInTable
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
void populateDataInTable(int nrows, String fname) throws Exception {
byte [] family = Bytes.toBytes(fname);
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
assertEquals(NUM_RS, rsts.size());
for (RegionServerThread rst : rsts) {
HRegionServer hrs = rst.getRegionServer();
List<HRegionInfo> hris = hrs.getOnlineRegions();
for (HRegionInfo hri : hris) {
if (hri.isMetaTable()) {
continue;
}
LOG.debug("adding data to rs = " + rst.getName() +
" region = "+ hri.getRegionNameAsString());
HRegion region = hrs.getOnlineRegion(hri.getRegionName());
assertTrue(region != null);
putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), family);
}
}
}
示例2: populateDataInTable
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
void populateDataInTable(int nrows, String fname) throws Exception {
byte [] family = Bytes.toBytes(fname);
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
assertEquals(NUM_RS, rsts.size());
for (RegionServerThread rst : rsts) {
HRegionServer hrs = rst.getRegionServer();
List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
for (HRegionInfo hri : hris) {
if (hri.getTable().isSystemTable()) {
continue;
}
LOG.debug("adding data to rs = " + rst.getName() +
" region = "+ hri.getRegionNameAsString());
HRegion region = hrs.getOnlineRegion(hri.getRegionName());
assertTrue(region != null);
putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), family);
}
}
}
示例3: populateDataInTable
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
void populateDataInTable(int nrows, String fname) throws Exception {
byte [] family = Bytes.toBytes(fname);
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
assertEquals(NUM_RS, rsts.size());
for (RegionServerThread rst : rsts) {
HRegionServer hrs = rst.getRegionServer();
List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs);
for (HRegionInfo hri : hris) {
if (hri.getTable().isSystemTable()) {
continue;
}
LOG.debug("adding data to rs = " + rst.getName() +
" region = "+ hri.getRegionNameAsString());
HRegion region = hrs.getOnlineRegion(hri.getRegionName());
assertTrue(region != null);
putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), family);
}
}
}
示例4: getOtherRegionServer
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
/**
* Find regionserver other than the one passed.
* Can't rely on indexes into list of regionservers since crashed servers
* occupy an index.
* @param cluster
* @param notThisOne
* @return A regionserver that is not <code>notThisOne</code> or null if none
* found
*/
private HRegionServer getOtherRegionServer(final MiniHBaseCluster cluster,
final HRegionServer notThisOne) {
for (RegionServerThread rst: cluster.getRegionServerThreads()) {
HRegionServer hrs = rst.getRegionServer();
if (hrs.getServerName().equals(notThisOne.getServerName())) continue;
if (hrs.isStopping() || hrs.isStopped()) continue;
return hrs;
}
return null;
}
示例5: evictHFileCache
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
private void evictHFileCache(final Path hfile) throws Exception {
for (RegionServerThread rst: UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
HRegionServer rs = rst.getRegionServer();
rs.getCacheConfig().getBlockCache().evictBlocksByHfileName(hfile.getName());
}
Thread.sleep(6000);
}
示例6: testShouldCheckMasterFailOverWhenMETAIsInOpenedState
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
throws Exception {
LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
final int NUM_MASTERS = 1;
final int NUM_RS = 2;
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.master.info.port", -1);
conf.setBoolean("hbase.assignment.usezk", true);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// Find regionserver carrying meta.
List<RegionServerThread> regionServerThreads =
cluster.getRegionServerThreads();
Region metaRegion = null;
HRegionServer metaRegionServer = null;
for (RegionServerThread regionServerThread : regionServerThreads) {
HRegionServer regionServer = regionServerThread.getRegionServer();
metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
regionServer.abort("");
if (null != metaRegion) {
metaRegionServer = regionServer;
break;
}
}
TEST_UTIL.shutdownMiniHBaseCluster();
// Create a ZKW to use in the test
ZooKeeperWatcher zkw =
HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
metaRegion, metaRegionServer.getServerName());
LOG.info("Staring cluster for second time");
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
while (!master.isInitialized()) {
Thread.sleep(100);
}
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
zkw.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}
示例7: testShouldCheckMasterFailOverWhenMETAIsInOpenedState
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
throws Exception {
LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
final int NUM_MASTERS = 1;
final int NUM_RS = 2;
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.master.info.port", -1);
conf.setBoolean("hbase.assignment.usezk", true);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// Find regionserver carrying meta.
List<RegionServerThread> regionServerThreads =
cluster.getRegionServerThreads();
HRegion metaRegion = null;
HRegionServer metaRegionServer = null;
for (RegionServerThread regionServerThread : regionServerThreads) {
HRegionServer regionServer = regionServerThread.getRegionServer();
metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
regionServer.abort("");
if (null != metaRegion) {
metaRegionServer = regionServer;
break;
}
}
TEST_UTIL.shutdownMiniHBaseCluster();
// Create a ZKW to use in the test
ZooKeeperWatcher zkw =
HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
metaRegion, metaRegionServer.getServerName());
LOG.info("Staring cluster for second time");
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
while (!master.isInitialized()) {
Thread.sleep(100);
}
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
zkw.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}