本文整理汇总了Java中org.apache.hadoop.hbase.ClusterStatus.getLoad方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterStatus.getLoad方法的具体用法?Java ClusterStatus.getLoad怎么用?Java ClusterStatus.getLoad使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ClusterStatus
的用法示例。
在下文中一共展示了ClusterStatus.getLoad方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: unbalanceRegions
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
protected void unbalanceRegions(ClusterStatus clusterStatus,
List<ServerName> fromServers, List<ServerName> toServers,
double fractionOfRegions) throws Exception {
List<byte[]> victimRegions = new LinkedList<byte[]>();
for (ServerName server : fromServers) {
ServerLoad serverLoad = clusterStatus.getLoad(server);
// Ugh.
List<byte[]> regions = new LinkedList<byte[]>(serverLoad.getRegionsLoad().keySet());
int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size());
LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName());
for (int i = 0; i < victimRegionCount; ++i) {
int victimIx = RandomUtils.nextInt(regions.size());
String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx));
victimRegions.add(Bytes.toBytes(regionId));
}
}
LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
+ " servers to " + toServers.size() + " different servers");
HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
for (byte[] victimRegion : victimRegions) {
int targetIx = RandomUtils.nextInt(toServers.size());
admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName()));
}
}
示例2: unbalanceRegions
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
protected void unbalanceRegions(ClusterStatus clusterStatus,
List<ServerName> fromServers, List<ServerName> toServers,
double fractionOfRegions) throws Exception {
List<byte[]> victimRegions = new LinkedList<byte[]>();
for (ServerName server : fromServers) {
ServerLoad serverLoad = clusterStatus.getLoad(server);
// Ugh.
List<byte[]> regions = new LinkedList<byte[]>(serverLoad.getRegionsLoad().keySet());
int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size());
LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName());
for (int i = 0; i < victimRegionCount; ++i) {
int victimIx = random.nextInt(regions.size());
String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx));
victimRegions.add(Bytes.toBytes(regionId));
}
}
LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
+ " servers to " + toServers.size() + " different servers");
HBaseAdmin admin = this.context.getHaseIntegrationTestingUtility().getHBaseAdmin();
for (byte[] victimRegion : victimRegions) {
int targetIx = random.nextInt(toServers.size());
admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName()));
}
}
示例3: init
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
private void init(RegionLocator regionLocator, Admin admin)
throws IOException {
if (!enabled(admin.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
//get regions for table
List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionLocation regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionInfo().getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
}
示例4: unbalanceRegions
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
protected void unbalanceRegions(ClusterStatus clusterStatus,
List<ServerName> fromServers, List<ServerName> toServers,
double fractionOfRegions) throws Exception {
List<byte[]> victimRegions = new LinkedList<byte[]>();
for (ServerName server : fromServers) {
ServerLoad serverLoad = clusterStatus.getLoad(server);
// Ugh.
List<byte[]> regions = new LinkedList<byte[]>(serverLoad.getRegionsLoad().keySet());
int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size());
LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName());
for (int i = 0; i < victimRegionCount; ++i) {
int victimIx = RandomUtils.nextInt(regions.size());
String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx));
victimRegions.add(Bytes.toBytes(regionId));
}
}
LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size()
+ " servers to " + toServers.size() + " different servers");
Admin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin();
for (byte[] victimRegion : victimRegions) {
// Don't keep moving regions if we're
// trying to stop the monkey.
if (context.isStopping()) {
break;
}
int targetIx = RandomUtils.nextInt(toServers.size());
admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName()));
}
}
示例5: testReplicationStatus
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Test for HBASE-9531
* put a few rows into htable1, which should be replicated to htable2
* create a ClusterStatus instance 'status' from HBaseAdmin
* test : status.getLoad(server).getReplicationLoadSourceList()
* test : status.getLoad(server).getReplicationLoadSink()
* * @throws Exception
*/
@Test(timeout = 300000)
public void testReplicationStatus() throws Exception {
LOG.info("testReplicationStatus");
try (Admin admin = utility1.getConnection().getAdmin()) {
final byte[] qualName = Bytes.toBytes("q");
Put p;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p = new Put(Bytes.toBytes("row" + i));
p.add(famName, qualName, Bytes.toBytes("val" + i));
htable1.put(p);
}
ClusterStatus status = admin.getClusterStatus();
for (ServerName server : status.getServers()) {
ServerLoad sl = status.getLoad(server);
List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList();
ReplicationLoadSink rLoadSink = sl.getReplicationLoadSink();
// check SourceList has at least one entry
assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() > 0));
// check Sink exist only as it is difficult to verify the value on the fly
assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ",
(rLoadSink.getAgeOfLastAppliedOp() >= 0));
assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ",
(rLoadSink.getTimeStampsOfLastAppliedOp() >= 0));
}
}
}
示例6: RegionLoadAdapter
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
public RegionLoadAdapter(HBaseAdmin admin, Map<byte[], HRegionInfo> regionMap, Args args) throws IOException {
long timestamp = System.currentTimeMillis();
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> serverNames = clusterStatus.getServers();
for (ServerName serverName : serverNames) {
HServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : serverLoad.getRegionsLoad().entrySet()) {
if (regionMap.get(entry.getKey()) != null)
regionLoadMap.put(regionMap.get(entry.getKey()), new RegionLoadDelegator(entry.getValue()));
}
}
Util.printVerboseMessage(args, "RegionLoadAdapter", timestamp);
}
示例7: init
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
private void init(RegionLocator regionLocator, Admin admin)
throws IOException {
if (!enabled(admin.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
//get regions for table
List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionLocation regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionInfo().getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = (regionLoad.getStorefileSizeMB() + regionLoad.getMemStoreSizeMB()) * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
}
示例8: get
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
ClusterStatus status = admin.getClusterStatus();
StorageClusterStatusModel model = new StorageClusterStatusModel();
model.setRegions(status.getRegionsCount());
model.setRequests(status.getRequestsCount());
model.setAverageLoad(status.getAverageLoad());
for (ServerName info: status.getServers()) {
HServerLoad load = status.getLoad(info);
StorageClusterStatusModel.Node node =
model.addLiveNode(
info.getHostname() + ":" +
Integer.toString(info.getPort()),
info.getStartcode(), load.getUsedHeapMB(),
load.getMaxHeapMB());
node.setRequests(load.getNumberOfRequests());
for (HServerLoad.RegionLoad region: load.getRegionsLoad().values()) {
node.addRegion(region.getName(), region.getStores(),
region.getStorefiles(), region.getStorefileSizeMB(),
region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB());
}
}
for (ServerName name: status.getDeadServerNames()) {
model.addDeadNode(name.toString());
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
return response.build();
} catch (IOException e) {
throw new WebApplicationException(e,
Response.Status.SERVICE_UNAVAILABLE);
}
}
示例9: get
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
MIMETYPE_PROTOBUF_IETF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
ClusterStatus status = servlet.getAdmin().getClusterStatus();
StorageClusterStatusModel model = new StorageClusterStatusModel();
model.setRegions(status.getRegionsCount());
model.setRequests(status.getRequestsCount());
model.setAverageLoad(status.getAverageLoad());
for (ServerName info: status.getServers()) {
ServerLoad load = status.getLoad(info);
StorageClusterStatusModel.Node node =
model.addLiveNode(
info.getHostname() + ":" +
Integer.toString(info.getPort()),
info.getStartcode(), load.getUsedHeapMB(),
load.getMaxHeapMB());
node.setRequests(load.getNumberOfRequests());
for (RegionLoad region: load.getRegionsLoad().values()) {
node.addRegion(region.getName(), region.getStores(),
region.getStorefiles(), region.getStorefileSizeMB(),
region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),
region.getReadRequestsCount(), region.getWriteRequestsCount(),
region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
region.getCurrentCompactedKVs());
}
}
for (ServerName name: status.getDeadServerNames()) {
model.addDeadNode(name.toString());
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.SERVICE_UNAVAILABLE)
.type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
.build();
}
}
示例10: get
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
@GET
@Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
MIMETYPE_PROTOBUF_IETF})
public Response get(final @Context UriInfo uriInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
ClusterStatus status = servlet.getAdmin().getClusterStatus();
StorageClusterStatusModel model = new StorageClusterStatusModel();
model.setRegions(status.getRegionsCount());
model.setRequests(status.getRequestsCount());
model.setAverageLoad(status.getAverageLoad());
for (ServerName info: status.getServers()) {
HServerLoad load = status.getLoad(info);
StorageClusterStatusModel.Node node =
model.addLiveNode(
info.getHostname() + ":" +
Integer.toString(info.getPort()),
info.getStartcode(), load.getUsedHeapMB(),
load.getMaxHeapMB());
node.setRequests(load.getNumberOfRequests());
for (HServerLoad.RegionLoad region: load.getRegionsLoad().values()) {
node.addRegion(region.getName(), region.getStores(),
region.getStorefiles(), region.getStorefileSizeMB(),
region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(),
region.getReadRequestsCount(), region.getWriteRequestsCount(),
region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(),
region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(),
region.getCurrentCompactedKVs());
}
}
for (ServerName name: status.getDeadServerNames()) {
model.addDeadNode(name.toString());
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.SERVICE_UNAVAILABLE)
.type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
.build();
}
}
示例11: RegionSizeCalculator
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/** ctor for unit testing */
RegionSizeCalculator (HTable table, HBaseAdmin admin) throws IOException {
try {
if (!enabled(table.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + new String(table.getTableName()) + "\".");
//get regions for table
Set<HRegionInfo> tableRegionInfos = table.getRegionLocations().keySet();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionInfo regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
} finally {
admin.close();
}
}
示例12: HBaseRegionSizeCalculator
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Computes size of each region for table and given column families.
* */
public HBaseRegionSizeCalculator(String tableName, Connection hbaseConnection) throws IOException {
Table table = null;
Admin admin = null;
try {
table = hbaseConnection.getTable(TableName.valueOf(tableName));
admin = hbaseConnection.getAdmin();
if (!enabled(table.getConfiguration())) {
logger.info("Region size calculation disabled.");
return;
}
logger.info("Calculating region sizes for table \"" + table.getName() + "\".");
// Get regions for table.
RegionLocator regionLocator = hbaseConnection.getRegionLocator(table.getName());
List<HRegionLocation> regionLocationList = regionLocator.getAllRegionLocations();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionLocation hRegionLocation : regionLocationList) {
tableRegions.add(hRegionLocation.getRegionInfo().getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
// Iterate all cluster regions, filter regions from our table and
// compute their size.
for (ServerName serverName : servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
countMap.put(regionId, new Pair<>(regionLoad.getStores(), regionLoad.getStorefiles()));
if (regionSizeBytes == 0L) {
logger.info("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
} finally {
IOUtils.closeQuietly(admin);
}
}
示例13: HBaseRegionSizeCalculator
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/** Constructor for unit testing */
HBaseRegionSizeCalculator(HTable table, HBaseAdmin hBaseAdmin) throws IOException {
try {
if (!enabled(table.getConfiguration())) {
logger.info("Region size calculation disabled.");
return;
}
logger.info("Calculating region sizes for table \"" + new String(table.getTableName()) + "\".");
// Get regions for table.
Set<HRegionInfo> tableRegionInfos = table.getRegionLocations().keySet();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionInfo regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionName());
}
ClusterStatus clusterStatus = hBaseAdmin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
// Iterate all cluster regions, filter regions from our table and
// compute their size.
for (ServerName serverName : servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
// logger.info("Region " + regionLoad.getNameAsString()
// + " has size " + regionSizeBytes);
}
}
}
} finally {
hBaseAdmin.close();
}
}