当前位置: 首页>>代码示例>>Java>>正文


Java ClusterStatusProtos类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos的典型用法代码示例。如果您正苦于以下问题:Java ClusterStatusProtos类的具体用法?Java ClusterStatusProtos怎么用?Java ClusterStatusProtos使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ClusterStatusProtos类属于org.apache.hadoop.hbase.protobuf.generated包,在下文中一共展示了ClusterStatusProtos类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sourceToString

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
/**
 * sourceToString
 * @return a string contains sourceReplicationLoad information
 */
public String sourceToString() {
  if (this.sourceMetricsList == null) return null;

  StringBuilder sb = new StringBuilder();

  for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceList) {

    sb = Strings.appendKeyValue(sb, "\n           PeerID", rls.getPeerID());
    sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp());
    sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue());
    sb =
        Strings.appendKeyValue(sb, "TimeStampsOfLastShippedOp",
          (new Date(rls.getTimeStampOfLastShippedOp()).toString()));
    sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag());
  }

  return sb.toString();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:ReplicationLoad.java

示例2: createServerLoadProto

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
  HBaseProtos.RegionSpecifier rSpecOne =
      HBaseProtos.RegionSpecifier.newBuilder()
          .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
          .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
  HBaseProtos.RegionSpecifier rSpecTwo =
      HBaseProtos.RegionSpecifier.newBuilder()
          .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
          .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();

  ClusterStatusProtos.RegionLoad rlOne =
      ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
          .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
          .setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
  ClusterStatusProtos.RegionLoad rlTwo =
      ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
          .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
          .setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();

  ClusterStatusProtos.ServerLoad sl =
      ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
        addRegionLoads(rlTwo).build();
  return sl;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestServerLoad.java

示例3: ServerLoad

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
  this.serverLoad = serverLoad;
  for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
    stores += rl.getStores();
    storefiles += rl.getStorefiles();
    storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
    storefileSizeMB += rl.getStorefileSizeMB();
    memstoreSizeMB += rl.getMemstoreSizeMB();
    storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
    readRequestsCount += rl.getReadRequestsCount();
    writeRequestsCount += rl.getWriteRequestsCount();
    rootIndexSizeKB += rl.getRootIndexSizeKB();
    totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
    totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB();
    totalCompactingKVs += rl.getTotalCompactingKVs();
    currentCompactedKVs += rl.getCurrentCompactedKVs();
  }

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:ServerLoad.java

示例4: regionSequenceIdsToByteArray

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
/**
 * @param regionLastFlushedSequenceId the flushed sequence id of a region which is the min of its
 *          store max seq ids
 * @param storeSequenceIds column family to sequence Id map
 * @return Serialized protobuf of <code>RegionSequenceIds</code> with pb magic prefix prepended
 *         suitable for use to filter wal edits in distributedLogReplay mode
 */
public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId,
    final Map<byte[], Long> storeSequenceIds) {
  ClusterStatusProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
      ClusterStatusProtos.RegionStoreSequenceIds.newBuilder();
  ClusterStatusProtos.StoreSequenceId.Builder storeSequenceIdBuilder =
      ClusterStatusProtos.StoreSequenceId.newBuilder();
  if (storeSequenceIds != null) {
    for (Map.Entry<byte[], Long> e : storeSequenceIds.entrySet()){
      byte[] columnFamilyName = e.getKey();
      Long curSeqId = e.getValue();
      storeSequenceIdBuilder.setFamilyName(ByteStringer.wrap(columnFamilyName));
      storeSequenceIdBuilder.setSequenceId(curSeqId);
      regionSequenceIdsBuilder.addStoreSequenceId(storeSequenceIdBuilder.build());
      storeSequenceIdBuilder.clear();
    }
  }
  regionSequenceIdsBuilder.setLastFlushedSequenceId(regionLastFlushedSequenceId);
  byte[] result = regionSequenceIdsBuilder.build().toByteArray();
  return ProtobufUtil.prependPBMagic(result);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:ZKUtil.java

示例5: parseRegionStoreSequenceIds

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
/**
 * @param bytes Content of serialized data of RegionStoreSequenceIds
 * @return a RegionStoreSequenceIds object
 * @throws DeserializationException
 */
public static RegionStoreSequenceIds parseRegionStoreSequenceIds(final byte[] bytes)
    throws DeserializationException {
  if (bytes == null || !ProtobufUtil.isPBMagicPrefix(bytes)) {
    throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
  }
  RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
      ClusterStatusProtos.RegionStoreSequenceIds.newBuilder();
  int pblen = ProtobufUtil.lengthOfPBMagic();
  RegionStoreSequenceIds storeIds = null;
  try {
    ProtobufUtil.mergeFrom(regionSequenceIdsBuilder, bytes, pblen, bytes.length - pblen);
    storeIds = regionSequenceIdsBuilder.build();
  } catch (IOException e) {
    throw new DeserializationException(e);
  }
  return storeIds;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:ZKUtil.java

示例6: createServerLoadProto

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
  HBaseProtos.RegionSpecifier rSpecOne =
      HBaseProtos.RegionSpecifier.newBuilder()
          .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
          .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
  HBaseProtos.RegionSpecifier rSpecTwo =
      HBaseProtos.RegionSpecifier.newBuilder()
          .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
          .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();

  ClusterStatusProtos.RegionLoad rlOne =
      ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
          .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
          .setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).build();
  ClusterStatusProtos.RegionLoad rlTwo =
      ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
          .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
          .setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).build();

  ClusterStatusProtos.ServerLoad sl =
      ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
        addRegionLoads(rlTwo).build();
  return sl;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:25,代码来源:TestServerLoad.java

示例7: regionServerReport

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@Override
public RegionServerReportResponse regionServerReport(
    RpcController controller, RegionServerReportRequest request) throws ServiceException {
  try {
    ClusterStatusProtos.ServerLoad sl = request.getLoad();
    ServerName serverName = ProtobufUtil.toServerName(request.getServer());
    ServerLoad oldLoad = serverManager.getLoad(serverName);
    this.serverManager.regionServerReport(serverName, new ServerLoad(sl));
    if (sl != null && this.metricsMaster != null) {
      // Up our metrics.
      this.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
        - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
    }
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }

  return RegionServerReportResponse.newBuilder().build();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:HMaster.java

示例8: ServerLoad

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
  this.serverLoad = serverLoad;
  for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
    stores += rl.getStores();
    storefiles += rl.getStorefiles();
    storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
    storefileSizeMB += rl.getStorefileSizeMB();
    memstoreSizeMB += rl.getMemstoreSizeMB();
    storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
    readRequestsCount += rl.getReadRequestsCount();
    writeRequestsCount += rl.getWriteRequestsCount();
    rootIndexSizeKB += rl.getRootIndexSizeKB();
    totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
    totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB();
    totalCompactingKVs += rl.getTotalCompactingKVs();
    currentCompactedKVs += rl.getCurrentCompactedKVs();
  }
  
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:ServerLoad.java

示例9: regionServerReport

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@Override
public RegionServerReportResponse regionServerReport(
    RpcController controller, RegionServerReportRequest request) throws ServiceException {
  try {
    master.checkServiceStarted();
    ClusterStatusProtos.ServerLoad sl = request.getLoad();
    ServerName serverName = ProtobufUtil.toServerName(request.getServer());
    ServerLoad oldLoad = master.serverManager.getLoad(serverName);
    master.serverManager.regionServerReport(serverName, new ServerLoad(sl));
    if (sl != null && master.metricsMaster != null) {
      // Up our metrics.
      master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests()
        - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
    }
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }
  return RegionServerReportResponse.newBuilder().build();
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:20,代码来源:MasterRpcServices.java

示例10: regionServerReport

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@Override
public RegionServerReportResponse regionServerReport(
    RpcController controller, RegionServerReportRequest request) throws ServiceException {
  try {
    ClusterStatusProtos.ServerLoad sl = request.getLoad();
    this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
    if (sl != null && this.metricsMaster != null) {
      // Up our metrics.
      this.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests());
    }
  } catch (IOException ioe) {
    throw new ServiceException(ioe);
  }

  return RegionServerReportResponse.newBuilder().build();
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:17,代码来源:HMaster.java

示例11: testClusterRequests

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@Test(timeout = 300000)
public void testClusterRequests() throws Exception {

  // sending fake request to master to see how metric value has changed
  RegionServerStatusProtos.RegionServerReportRequest.Builder request =
      RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
  HRegionServer rs = cluster.getRegionServer(0);
  request.setServer(ProtobufUtil.toServerName(rs.getServerName()));

  ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
                                         .setTotalNumberOfRequests(10000)
                                         .build();
  master.getMetrics().getMetricsSource().init();
  request.setLoad(sl);
  master.regionServerReport(null, request.build());

  metricsHelper.assertCounter("cluster_requests", 10000,
      master.getMetrics().getMetricsSource());
  master.stopMaster();
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:21,代码来源:TestMasterMetrics.java

示例12: tryRegionServerReport

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@VisibleForTesting protected void tryRegionServerReport(long reportStartTime, long reportEndTime)
    throws IOException {
  RegionServerStatusService.BlockingInterface rss = rssStub;
  if (rss == null) {
    // the current server could be stopping.
    return;
  }
  ClusterStatusProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
  try {
    RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
    ServerName sn = ServerName.parseVersionedServerName(this.serverName.getVersionedBytes());
    request.setServer(ProtobufUtil.toServerName(sn));
    request.setLoad(sl);
    rss.regionServerReport(null, request.build());
  } catch (ServiceException se) {
    IOException ioe = ProtobufUtil.getRemoteException(se);
    if (ioe instanceof YouAreDeadException) {
      // This will be caught and handled as a fatal error in run()
      throw ioe;
    }
    if (rssStub == rss) {
      rssStub = null;
    }
    // Couldn't connect to the master, get location from zk and reconnect
    // Method blocks until new master is found or we are stopped
    createRegionServerStatusStub();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:HRegionServer.java

示例13: testClusterRequests

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@Test(timeout = 300000)
public void testClusterRequests() throws Exception {

  // sending fake request to master to see how metric value has changed
  RegionServerStatusProtos.RegionServerReportRequest.Builder request =
      RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
  ServerName serverName = cluster.getMaster(0).getServerName();
  request.setServer(ProtobufUtil.toServerName(serverName));

  MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
  ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
                                         .setTotalNumberOfRequests(10000)
                                         .build();
  masterSource.init();
  request.setLoad(sl);
  master.getMasterRpcServices().regionServerReport(null, request.build());

  metricsHelper.assertCounter("cluster_requests", 10000, masterSource);

  sl = ClusterStatusProtos.ServerLoad.newBuilder()
      .setTotalNumberOfRequests(15000)
      .build();
  request.setLoad(sl);
  master.getMasterRpcServices().regionServerReport(null, request.build());

  metricsHelper.assertCounter("cluster_requests", 15000, masterSource);

  master.getMasterRpcServices().regionServerReport(null, request.build());

  metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
  master.stopMaster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestMasterMetrics.java

示例14: test

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
@Test
public void test() {
  RegionState state1 = new RegionState(
          new HRegionInfo(TableName.valueOf("table")), RegionState.State.OPENING);
  ClusterStatusProtos.RegionState protobuf1 = state1.convert();
  RegionState state2 = RegionState.convert(protobuf1);
  ClusterStatusProtos.RegionState protobuf2 = state1.convert();

  assertEquals(state1, state2);
  assertEquals(protobuf1, protobuf2);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestRegionState.java

示例15: getRegionsLoad

import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入依赖的package包/类
/**
 * @return region load metrics
 */
public Map<byte[], RegionLoad> getRegionsLoad() {
  Map<byte[], RegionLoad> regionLoads =
    new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
  for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
    RegionLoad regionLoad = new RegionLoad(rl);
    regionLoads.put(regionLoad.getName(), regionLoad);
  }
  return regionLoads;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:ServerLoad.java


注:本文中的org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。