本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos类的典型用法代码示例。如果您正苦于以下问题:Java RegionServerStatusProtos类的具体用法?Java RegionServerStatusProtos怎么用?Java RegionServerStatusProtos使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RegionServerStatusProtos类属于org.apache.hadoop.hbase.protobuf.generated包,在下文中一共展示了RegionServerStatusProtos类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testClusterRequests
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test(timeout = 300000)
public void testClusterRequests() throws Exception {
// sending fake request to master to see how metric value has changed
RegionServerStatusProtos.RegionServerReportRequest.Builder request =
RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
HRegionServer rs = cluster.getRegionServer(0);
request.setServer(ProtobufUtil.toServerName(rs.getServerName()));
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(10000)
.build();
master.getMetrics().getMetricsSource().init();
request.setLoad(sl);
master.regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 10000,
master.getMetrics().getMetricsSource());
master.stopMaster();
}
示例2: testClusterRequests
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test(timeout = 300000)
public void testClusterRequests() throws Exception {
// sending fake request to master to see how metric value has changed
RegionServerStatusProtos.RegionServerReportRequest.Builder request =
RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
HRegionServer rs = cluster.getRegionServer(0);
request.setServer(ProtobufUtil.toServerName(rs.getServerName()));
HBaseProtos.ServerLoad sl = HBaseProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(10000)
.build();
master.getMetrics().getMetricsSource().init();
request.setLoad(sl);
master.regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 10000,
master.getMetrics().getMetricsSource());
master.stopMaster();
}
示例3: getPriority
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
public int getPriority(RPCProtos.RequestHeader header, Message param, User user) {
// Yes this is copy pasted from the base class but it keeps from having to look in the
// annotatedQos table twice something that could get costly since this is called for
// every single RPC request.
int priorityByAnnotation = getAnnotatedPriority(header);
if (priorityByAnnotation >= 0) {
return priorityByAnnotation;
}
// If meta is moving then all the other of reports of state transitions will be
// un able to edit meta. Those blocked reports should not keep the report that opens meta from
// running. Hence all reports of meta transitioning should always be in a different thread.
// This keeps from deadlocking the cluster.
if (param instanceof RegionServerStatusProtos.ReportRegionStateTransitionRequest) {
// Regions are moving. Lets see which ones.
RegionServerStatusProtos.ReportRegionStateTransitionRequest
tRequest = (RegionServerStatusProtos.ReportRegionStateTransitionRequest) param;
for (RegionServerStatusProtos.RegionStateTransition rst : tRequest.getTransitionList()) {
if (rst.getRegionInfoList() != null) {
for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
TableName tn = ProtobufUtil.toTableName(info.getTableName());
if (tn.isSystemTable()) {
return HConstants.SYSTEMTABLE_QOS;
}
}
}
}
return HConstants.NORMAL_QOS;
}
// Handle the rest of the different reasons to change priority.
return getBasePriority(header, param);
}
示例4: isWriteRequest
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
private boolean isWriteRequest(final RequestHeader header, final Message param) {
// TODO: Is there a better way to do this?
if (param instanceof MultiRequest) {
MultiRequest multi = (MultiRequest)param;
for (RegionAction regionAction : multi.getRegionActionList()) {
for (Action action: regionAction.getActionList()) {
if (action.hasMutation()) {
return true;
}
}
}
}
if (param instanceof MutateRequest) {
return true;
}
// Below here are methods for master. It's a pretty brittle version of this.
// Not sure that master actually needs a read/write queue since 90% of requests to
// master are writing to status or changing the meta table.
// All other read requests are admin generated and can be processed whenever.
// However changing that would require a pretty drastic change and should be done for
// the next major release and not as a fix for HBASE-14239
if (param instanceof RegionServerStatusProtos.ReportRegionStateTransitionRequest) {
return true;
}
if (param instanceof RegionServerStatusProtos.RegionServerStartupRequest) {
return true;
}
if (param instanceof RegionServerStatusProtos.RegionServerReportRequest) {
return true;
}
return false;
}
示例5: testClusterRequests
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test(timeout = 300000)
public void testClusterRequests() throws Exception {
// sending fake request to master to see how metric value has changed
RegionServerStatusProtos.RegionServerReportRequest.Builder request =
RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
ServerName serverName = cluster.getMaster(0).getServerName();
request.setServer(ProtobufUtil.toServerName(serverName));
MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(10000)
.build();
masterSource.init();
request.setLoad(sl);
master.getMasterRpcServices().regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 10000, masterSource);
sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(15000)
.build();
request.setLoad(sl);
master.getMasterRpcServices().regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
master.getMasterRpcServices().regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
master.stopMaster();
}
示例6: testRegionInTransition
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test
public void testRegionInTransition() throws IOException {
// Check ReportRegionInTransition
HBaseProtos.RegionInfo meta_ri = HRegionInfo.convert(HRegionInfo.FIRST_META_REGIONINFO);
HBaseProtos.RegionInfo normal_ri = HRegionInfo.convert(
new HRegionInfo(TableName.valueOf("test:table"),
Bytes.toBytes("a"), Bytes.toBytes("b"), false));
RegionServerStatusProtos.RegionStateTransition metaTransition = RegionServerStatusProtos
.RegionStateTransition.newBuilder()
.addRegionInfo(meta_ri)
.setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode.CLOSED)
.build();
RegionServerStatusProtos.RegionStateTransition normalTransition = RegionServerStatusProtos
.RegionStateTransition.newBuilder()
.addRegionInfo(normal_ri)
.setTransitionCode(RegionServerStatusProtos.RegionStateTransition.TransitionCode.CLOSED)
.build();
RegionServerStatusProtos.ReportRegionStateTransitionRequest metaTransitionRequest =
RegionServerStatusProtos.ReportRegionStateTransitionRequest.newBuilder()
.setServer(ProtobufUtil.toServerName(ServerName.valueOf("locahost:60020", 100)))
.addTransition(normalTransition)
.addTransition(metaTransition).build();
RegionServerStatusProtos.ReportRegionStateTransitionRequest normalTransitionRequest =
RegionServerStatusProtos.ReportRegionStateTransitionRequest.newBuilder()
.setServer(ProtobufUtil.toServerName(ServerName.valueOf("locahost:60020", 100)))
.addTransition(normalTransition).build();
final String reportFuncName = "ReportRegionStateTransition";
checkMethod(conf, reportFuncName, HConstants.SYSTEMTABLE_QOS, qosFunction,
metaTransitionRequest);
checkMethod(conf, reportFuncName, HConstants.NORMAL_QOS, qosFunction, normalTransitionRequest);
}
示例7: getServices
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
/**
* @return list of blocking services and their security info classes that this server supports
*/
private List<BlockingServiceAndInterface> getServices() {
List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(3);
bssi.add(new BlockingServiceAndInterface(
MasterProtos.MasterService.newReflectiveBlockingService(this),
MasterProtos.MasterService.BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(
RegionServerStatusProtos.RegionServerStatusService.newReflectiveBlockingService(this),
RegionServerStatusProtos.RegionServerStatusService.BlockingInterface.class));
return bssi;
}
示例8: testClusterRequests
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test(timeout = 300000)
public void testClusterRequests() throws Exception {
// sending fake request to master to see how metric value has changed
RegionServerStatusProtos.RegionServerReportRequest.Builder request =
RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
ServerName serverName = cluster.getRegionServer(0).getServerName();
request.setServer(ProtobufUtil.toServerName(serverName));
MetricsMasterSource masterSource = master.getMetrics().getMetricsSource();
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(10000)
.build();
masterSource.init();
request.setLoad(sl);
master.regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 10000, masterSource);
sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(15000)
.build();
request.setLoad(sl);
master.regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
master.regionServerReport(null, request.build());
metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
master.stopMaster();
}
示例9: testMasterOpsWhileSplitting
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test
public void testMasterOpsWhileSplitting() throws Exception {
TableName tableName = TableName.valueOf("TestSplit");
byte[] familyName = Bytes.toBytes("fam");
try (HTable ht = TEST_UTIL.createTable(tableName, familyName)) {
TEST_UTIL.loadTable(ht, familyName, false);
}
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
byte[] firstRow = Bytes.toBytes("aaa");
byte[] splitRow = Bytes.toBytes("lll");
byte[] lastRow = Bytes.toBytes("zzz");
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) {
// this will also cache the region
byte[] regionName = conn.getRegionLocator(tableName).getRegionLocation(splitRow)
.getRegionInfo().getRegionName();
Region region = server.getRegion(regionName);
SplitTransactionImpl split = new SplitTransactionImpl((HRegion) region, splitRow);
split.prepare();
// 1. phase I
PairOfSameType<Region> regions = split.createDaughters(server, server, null);
assertFalse(test(conn, tableName, firstRow, server));
assertFalse(test(conn, tableName, lastRow, server));
// passing null as services prevents final step
// 2, most of phase II
split.openDaughters(server, null, regions.getFirst(), regions.getSecond());
assertFalse(test(conn, tableName, firstRow, server));
assertFalse(test(conn, tableName, lastRow, server));
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
if (split.useZKForAssignment) {
server.postOpenDeployTasks(regions.getSecond());
} else {
server.reportRegionStateTransition(
RegionServerStatusProtos.RegionStateTransition.TransitionCode.SPLIT,
region.getRegionInfo(), regions.getFirst().getRegionInfo(),
regions.getSecond().getRegionInfo());
}
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
// the 2nd daughter was added, so querying before the split key should fail.
assertFalse(test(conn, tableName, firstRow, server));
// past splitkey is ok.
assertTrue(test(conn, tableName, lastRow, server));
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(conn, tableName, firstRow, server));
assertTrue(test(conn, tableName, lastRow, server));
if (split.useZKForAssignment) {
// 4. phase III
((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getSplitTransactionCoordination().completeSplitTransaction(server, regions.getFirst(),
regions.getSecond(), split.std, region);
}
assertTrue(test(conn, tableName, firstRow, server));
assertTrue(test(conn, tableName, lastRow, server));
}
}
示例10: reportRegionStateTransition
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Override
public boolean reportRegionStateTransition(
RegionServerStatusProtos.RegionStateTransition.TransitionCode transitionCode,
long l, HRegionInfo... hRegionInfos) {
return false;
}
示例11: testMasterOpsWhileSplitting
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; //导入依赖的package包/类
@Test
public void testMasterOpsWhileSplitting() throws Exception {
TableName tableName =
TableName.valueOf("TestSplit");
byte[] familyName = Bytes.toBytes("fam");
try (HTable ht = TEST_UTIL.createTable(tableName, familyName)) {
TEST_UTIL.loadTable(ht, familyName, false);
}
HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0);
byte []firstRow = Bytes.toBytes("aaa");
byte []splitRow = Bytes.toBytes("lll");
byte []lastRow = Bytes.toBytes("zzz");
HConnection con = HConnectionManager
.getConnection(TEST_UTIL.getConfiguration());
// this will also cache the region
byte[] regionName = con.locateRegion(tableName, splitRow).getRegionInfo()
.getRegionName();
HRegion region = server.getRegion(regionName);
SplitTransaction split = new SplitTransaction(region, splitRow);
split.useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
split.prepare();
// 1. phase I
PairOfSameType<HRegion> regions = split.createDaughters(server, server);
assertFalse(test(con, tableName, firstRow, server));
assertFalse(test(con, tableName, lastRow, server));
// passing null as services prevents final step
// 2, most of phase II
split.openDaughters(server, null, regions.getFirst(), regions.getSecond());
assertFalse(test(con, tableName, firstRow, server));
assertFalse(test(con, tableName, lastRow, server));
// 3. finish phase II
// note that this replicates some code from SplitTransaction
// 2nd daughter first
if (split.useZKForAssignment) {
server.postOpenDeployTasks(regions.getSecond());
} else {
server.reportRegionStateTransition(
RegionServerStatusProtos.RegionStateTransition.TransitionCode.SPLIT,
region.getRegionInfo(), regions.getFirst().getRegionInfo(),
regions.getSecond().getRegionInfo());
}
// Add to online regions
server.addToOnlineRegions(regions.getSecond());
// THIS is the crucial point:
// the 2nd daughter was added, so querying before the split key should fail.
assertFalse(test(con, tableName, firstRow, server));
// past splitkey is ok.
assertTrue(test(con, tableName, lastRow, server));
// first daughter second
if (split.useZKForAssignment) {
server.postOpenDeployTasks(regions.getFirst());
}
// Add to online regions
server.addToOnlineRegions(regions.getFirst());
assertTrue(test(con, tableName, firstRow, server));
assertTrue(test(con, tableName, lastRow, server));
if (split.useZKForAssignment) {
// 4. phase III
((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getSplitTransactionCoordination().completeSplitTransaction(server, regions.getFirst(),
regions.getSecond(), split.std, region);
}
assertTrue(test(con, tableName, firstRow, server));
assertTrue(test(con, tableName, lastRow, server));
}