本文整理匯總了Java中org.apache.hadoop.hbase.client.HBaseAdmin.getClusterStatus方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseAdmin.getClusterStatus方法的具體用法?Java HBaseAdmin.getClusterStatus怎麽用?Java HBaseAdmin.getClusterStatus使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HBaseAdmin
的用法示例。
在下文中一共展示了HBaseAdmin.getClusterStatus方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getDeployedHRIs
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例2: getDeployedHRIs
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(
final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
HConnection connection = admin.getConnection();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例3: collectRegionMetrics
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
protected void collectRegionMetrics(Map<RegionName, RegionInfo> aRegionInfos,
HBaseAdmin hBaseAdmin) throws IOException {
ClusterStatus clusterStatus = hBaseAdmin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
for (ServerName server : servers) {
ServerLoad load = clusterStatus.getLoad(server);
Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
for (Map.Entry<byte[], RegionLoad> regionLoadEntry : regionsLoad.entrySet()) {
RegionLoad regionLoad = regionLoadEntry.getValue();
RegionInfo regionInfo = aRegionInfos.get(new RegionName(regionLoad.getName()));
if (regionInfo == null) {
LOGGER.error("cannot find regionInfo:{}", regionLoad.getNameAsString());
}
if (regionInfo != null && !regionInfo.isSystemTable()) {
regionInfo.setFileCount(regionLoad.getStorefiles());
regionInfo.setStoreCount(regionLoad.getStores());
regionInfo.setActivityCount(regionLoad.getRequestsCount());
regionInfo.setServer(server);
}
}
}
}
示例4: connect
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* To repair region consistency, one must call connect() in order to repair
* online state.
*/
public void connect() throws IOException {
connection = HConnectionManager.createConnection(getConf());
admin = new HBaseAdmin(connection);
meta = new HTable(TableName.META_TABLE_NAME, connection);
status = admin.getClusterStatus();
}
示例5: RegionLoadAdapter
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public RegionLoadAdapter(HBaseAdmin admin, Map<byte[], HRegionInfo> regionMap, Args args) throws IOException {
long timestamp = System.currentTimeMillis();
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> serverNames = clusterStatus.getServers();
for (ServerName serverName : serverNames) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (Map.Entry<byte[], RegionLoad> entry : serverLoad.getRegionsLoad().entrySet()) {
if (regionMap.get(entry.getKey()) != null)
regionLoadMap.put(regionMap.get(entry.getKey()), new RegionLoadDelegator(entry.getValue()));
}
}
Util.printVerboseMessage(args, "RegionLoadAdapter", timestamp);
}
示例6: RegionLoadAdapter
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public RegionLoadAdapter(HBaseAdmin admin, Map<byte[], HRegionInfo> regionMap, Args args) throws IOException {
long timestamp = System.currentTimeMillis();
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> serverNames = clusterStatus.getServers();
for (ServerName serverName : serverNames) {
HServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : serverLoad.getRegionsLoad().entrySet()) {
if (regionMap.get(entry.getKey()) != null)
regionLoadMap.put(regionMap.get(entry.getKey()), new RegionLoadDelegator(entry.getValue()));
}
}
Util.printVerboseMessage(args, "RegionLoadAdapter", timestamp);
}
示例7: doAction
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Override
protected boolean doAction() throws Exception {
HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
ClusterStatus status = admin.getClusterStatus();
return status != null;
}
示例8: RegionSizeCalculator
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/** ctor for unit testing */
RegionSizeCalculator (HTable table, HBaseAdmin admin) throws IOException {
try {
if (!enabled(table.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + new String(table.getTableName()) + "\".");
//get regions for table
Set<HRegionInfo> tableRegionInfos = table.getRegionLocations().keySet();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionInfo regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
} finally {
admin.close();
}
}
示例9: collectCompactInfo
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public void collectCompactInfo(Configuration conf) throws IOException, InterruptedException {
HBaseAdmin hBaseAdmin = new HBaseAdmin(conf);
HConnection connection = hBaseAdmin.getConnection();
Map<RegionName, RegionInfo> regionInfoMap =
constructInitialRegionInfos(hBaseAdmin, connection.listTables());
ClusterStatus clusterStatus = hBaseAdmin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
HbaseBatchExecutor executor = null;
try {
executor = new HbaseBatchExecutor(hBaseAdmin);
for (ServerName server : servers) {
if (server != null) {
List<RegionInfo> regionsOnAServer = new LinkedList<>();
ServerLoad load = clusterStatus.getLoad(server);
Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
for (RegionLoad regionLoad : regionsLoad.values()) {
RegionName regionName = new RegionName(regionLoad.getName());
RegionInfo regionInfo = regionInfoMap.get(regionName);
if (regionInfo != null) {
regionsOnAServer.add(regionInfo);
}
}
List<RegionInfo> compactingRegionsOnAServer =
executor.getCompactingRegions(server, regionsOnAServer);
compactingRegions.put(server, compactingRegionsOnAServer);
addToCompactedSet(server, compactingRegionsOnAServer);
}
}
} finally {
if (executor != null) {
executor.close();
}
}
printoutFilteredRegions(compactingRegions);
}
示例10: findNonActiveRegionsAndCompact
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
private void findNonActiveRegionsAndCompact(HBaseAdmin aHBaseAdmin,
Map<ServerName, List<RegionInfo>> aFilteredRegions) throws IOException, InterruptedException {
ClusterStatus clusterStatus = aHBaseAdmin.getClusterStatus();
HbaseBatchExecutor executor = null;
try {
executor = new HbaseBatchExecutor(aHBaseAdmin);
for (ServerName server : clusterStatus.getServers()) {
int compactingCount = checkCompactingRegions(server, executor);
if (compactingCount < maxCompactingRegionPerServer) {
ServerLoad load = clusterStatus.getLoad(server);
Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
List<RegionInfo> regionInfos = aFilteredRegions.get(server);
for (int i = 0; regionInfos != null && i < regionInfos.size(); i++) {
RegionInfo savedInfo = regionInfos.get(i);
RegionLoad regionLoad = regionsLoad.get(savedInfo.getRegionName().toByteBinary());
if (regionLoad == null) {
LOGGER.warn("!!regionLoad doesn't have this region:{}", savedInfo.getRegionName());
} else {
long requestsCount = regionLoad.getRequestsCount();
if (savedInfo.getActivityCount() != requestsCount) {
LOGGER.info("Region Busy:{} {}", requestsCount - savedInfo.getActivityCount(),
savedInfo.getRegionName());
} else {
compactingCount = bookKeepingCompactingRegion(server, savedInfo);
LOGGER.info("Start Compact:{} with fileCountMinusCF={}", savedInfo.getRegionName(),
savedInfo.getFileCountMinusCF());
executor.majorCompact(server, savedInfo.getRegionName());
if (compactingCount >= maxCompactingRegionPerServer) {
break;
}
}
}
}
}
}
} finally {
if (executor != null) {
executor.close();
}
}
}