本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterStatusProtos.RegionLoad方法的具体用法?Java ClusterStatusProtos.RegionLoad怎么用?Java ClusterStatusProtos.RegionLoad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos
的用法示例。
在下文中一共展示了ClusterStatusProtos.RegionLoad方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createServerLoadProto
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
HBaseProtos.RegionSpecifier rSpecOne =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
HBaseProtos.RegionSpecifier rSpecTwo =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
ClusterStatusProtos.RegionLoad rlOne =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.RegionLoad rlTwo =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.ServerLoad sl =
ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
addRegionLoads(rlTwo).build();
return sl;
}
示例2: ServerLoad
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
this.serverLoad = serverLoad;
for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
stores += rl.getStores();
storefiles += rl.getStorefiles();
storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
storefileSizeMB += rl.getStorefileSizeMB();
memstoreSizeMB += rl.getMemstoreSizeMB();
storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
readRequestsCount += rl.getReadRequestsCount();
writeRequestsCount += rl.getWriteRequestsCount();
rootIndexSizeKB += rl.getRootIndexSizeKB();
totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB();
totalCompactingKVs += rl.getTotalCompactingKVs();
currentCompactedKVs += rl.getCurrentCompactedKVs();
}
}
示例3: createServerLoadProto
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
private ClusterStatusProtos.ServerLoad createServerLoadProto() {
HBaseProtos.RegionSpecifier rSpecOne =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
HBaseProtos.RegionSpecifier rSpecTwo =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
ClusterStatusProtos.RegionLoad rlOne =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).build();
ClusterStatusProtos.RegionLoad rlTwo =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).build();
ClusterStatusProtos.ServerLoad sl =
ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
addRegionLoads(rlTwo).build();
return sl;
}
示例4: ServerLoad
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
this.serverLoad = serverLoad;
for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
stores += rl.getStores();
storefiles += rl.getStorefiles();
storeUncompressedSizeMB += rl.getStoreUncompressedSizeMB();
storefileSizeMB += rl.getStorefileSizeMB();
memstoreSizeMB += rl.getMemstoreSizeMB();
storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
readRequestsCount += rl.getReadRequestsCount();
writeRequestsCount += rl.getWriteRequestsCount();
rootIndexSizeKB += rl.getRootIndexSizeKB();
totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
totalStaticBloomSizeKB += rl.getTotalStaticBloomSizeKB();
totalCompactingKVs += rl.getTotalCompactingKVs();
currentCompactedKVs += rl.getCurrentCompactedKVs();
}
}
示例5: getRegionsLoad
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
/**
* @return region load metrics
*/
public Map<byte[], RegionLoad> getRegionsLoad() {
Map<byte[], RegionLoad> regionLoads =
new TreeMap<byte[], RegionLoad>(Bytes.BYTES_COMPARATOR);
for (ClusterStatusProtos.RegionLoad rl : serverLoad.getRegionLoadsList()) {
RegionLoad regionLoad = new RegionLoad(rl);
regionLoads.put(regionLoad.getName(), regionLoad);
}
return regionLoads;
}
示例6: RegionLoad
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) {
this.regionLoadPB = regionLoadPB;
}
示例7: getCostWhenNoCachedRegionLoadsFound
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; //导入方法依赖的package包/类
public static Map<String, PartitionLoad> getCostWhenNoCachedRegionLoadsFound(String tableName){
try (Partition p = SIDriver.driver().getTableFactory().getTable(tableName)){
Map<byte[], Pair<String, Long>> ret = ((ClientPartition)p).coprocessorExec(SpliceMessage.SpliceDerbyCoprocessorService.class,
new Batch.Call<SpliceMessage.SpliceDerbyCoprocessorService, Pair<String, Long>>() {
@Override
public Pair<String, Long> call(SpliceMessage.SpliceDerbyCoprocessorService inctance) throws IOException {
ServerRpcController controller = new ServerRpcController();
SpliceMessage.SpliceRegionSizeRequest message = SpliceMessage.SpliceRegionSizeRequest.newBuilder().build();
BlockingRpcCallback<SpliceMessage.SpliceRegionSizeResponse> rpcCallback = new BlockingRpcCallback<>();
inctance.computeRegionSize(controller, message, rpcCallback);
if (controller.failed()) {
Throwable t = Throwables.getRootCause(controller.getFailedOn());
if (t instanceof IOException) throw (IOException) t;
else throw new IOException(t);
}
SpliceMessage.SpliceRegionSizeResponse response = rpcCallback.get();
return Pair.newPair(response.getEncodedName(), response.getSizeInBytes());
}
});
Collection<Pair<String, Long>> collection = ret.values();
long factor = 1024 * 1024;
Map<String, PartitionLoad> retMap = new HashMap<>();
for(Pair<String, Long> info : collection){
long sizeMB = info.getSecond() / factor;
ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder();
rl.setMemstoreSizeMB((int)(sizeMB / 2));
rl.setStorefileSizeMB((int) (sizeMB / 2));
rl.setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME).setValue(
ZeroCopyLiteralByteString.copyFromUtf8(info.getFirst())).build());
ClusterStatusProtos.RegionLoad load = rl.build();
HPartitionLoad value=new HPartitionLoad(info.getFirst(),load.getStorefileSizeMB(),load.getMemstoreSizeMB(),load.getStorefileIndexSizeMB());
retMap.put(info.getFirst(),value);
}
return retMap;
} catch (Throwable th){
SpliceLogUtils.error(LOG,"Unable to fetch region load info",th);
}
/*
* When we fail for whatever reason, we don't want to blow up the query, we just return no
* cached information. This will screw up the planning phase (since there is nothing to work with), but
* at least it won't explode.
*/
return Collections.emptyMap();
}