本文整理汇总了Java中org.apache.hadoop.hbase.ClusterStatus.getServers方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterStatus.getServers方法的具体用法?Java ClusterStatus.getServers怎么用?Java ClusterStatus.getServers使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ClusterStatus
的用法示例。
在下文中一共展示了ClusterStatus.getServers方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getDeployedHRIs
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例2: getCurrentServers
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/** Returns current region servers - active master */
protected ServerName[] getCurrentServers() throws IOException {
ClusterStatus clusterStatus = cluster.getClusterStatus();
Collection<ServerName> regionServers = clusterStatus.getServers();
int count = regionServers == null ? 0 : regionServers.size();
if (count <= 0) {
return new ServerName [] {};
}
ServerName master = clusterStatus.getMaster();
if (master == null || !regionServers.contains(master)) {
return regionServers.toArray(new ServerName[count]);
}
if (count == 1) {
return new ServerName [] {};
}
ArrayList<ServerName> tmp = new ArrayList<ServerName>(count);
tmp.addAll(regionServers);
tmp.remove(master);
return tmp.toArray(new ServerName[count-1]);
}
示例3: getDeployedHRIs
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(HBaseAdmin admin)
throws IOException {
ClusterStatus status = admin.getMaster().getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
HConnection connection = admin.getConnection();
for (ServerName hsi : regionServers) {
HRegionInterface server =
connection.getHRegionConnection(hsi.getHostname(), hsi.getPort());
// list all online regions from this region server
List<HRegionInfo> regions = server.getOnlineRegions();
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例4: getDeployedHRIs
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(
final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
HConnection connection = admin.getConnection();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例5: perform
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
ClusterStatus status = this.cluster.getClusterStatus();
List<ServerName> victimServers = new LinkedList<ServerName>(status.getServers());
int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size());
int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size());
Assert.assertTrue((liveCount + deadCount) < victimServers.size());
List<ServerName> targetServers = new ArrayList<ServerName>(liveCount);
for (int i = 0; i < liveCount + deadCount; ++i) {
int victimIx = RandomUtils.nextInt(victimServers.size());
targetServers.add(victimServers.remove(victimIx));
}
unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS);
Thread.sleep(WAIT_FOR_UNBALANCE_MS);
for (int i = 0; i < liveCount; ++i) {
killRs(targetServers.get(i));
}
Thread.sleep(WAIT_FOR_KILLS_MS);
forceBalancer();
Thread.sleep(WAIT_AFTER_BALANCE_MS);
for (int i = 0; i < liveCount; ++i) {
startRs(targetServers.get(i));
}
}
示例6: getDeployedHRIs
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(
final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
HConnection connection = admin.getConnection();
for (ServerName hsi : regionServers) {
AdminProtocol server =
connection.getAdmin(hsi.getHostname(), hsi.getPort());
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例7: init
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
private void init(RegionLocator regionLocator, Admin admin)
throws IOException {
if (!enabled(admin.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
//get regions for table
List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionLocation regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionInfo().getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
}
示例8: getRegionServerCount
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Alternative getCurrentNrHRS which is no longer available.
* @param connection
* @return Rough count of regionservers out on cluster.
* @throws IOException
*/
private static int getRegionServerCount(final Connection connection) throws IOException {
try (Admin admin = connection.getAdmin()) {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> servers = status.getServers();
return servers == null || servers.isEmpty()? 0: servers.size();
}
}
示例9: perform
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
ClusterStatus status = this.cluster.getClusterStatus();
List<ServerName> victimServers = new LinkedList<ServerName>(status.getServers());
Set<ServerName> killedServers = new HashSet<ServerName>();
int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size());
int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size());
Assert.assertTrue((liveCount + deadCount) < victimServers.size());
List<ServerName> targetServers = new ArrayList<ServerName>(liveCount);
for (int i = 0; i < liveCount + deadCount; ++i) {
int victimIx = RandomUtils.nextInt(victimServers.size());
targetServers.add(victimServers.remove(victimIx));
}
unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS);
Thread.sleep(waitForUnbalanceMilliSec);
for (int i = 0; i < liveCount; ++i) {
// Don't keep killing servers if we're
// trying to stop the monkey.
if (context.isStopping()) {
break;
}
killRs(targetServers.get(i));
killedServers.add(targetServers.get(i));
}
Thread.sleep(waitForKillsMilliSec);
forceBalancer();
Thread.sleep(waitAfterBalanceMilliSec);
for (ServerName server:killedServers) {
startRs(server);
}
}
示例10: perform
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
@Override
public void perform() throws Exception {
LOG.info("Unbalancing regions");
ClusterStatus status = this.cluster.getClusterStatus();
List<ServerName> victimServers = new LinkedList<ServerName>(status.getServers());
int targetServerCount = (int)Math.ceil(fractionOfServers * victimServers.size());
List<ServerName> targetServers = new ArrayList<ServerName>(targetServerCount);
for (int i = 0; i < targetServerCount; ++i) {
int victimIx = RandomUtils.nextInt(victimServers.size());
targetServers.add(victimServers.remove(victimIx));
}
unbalanceRegions(status, victimServers, targetServers, fractionOfRegions);
}
示例11: csToACS
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
static public AClusterStatus csToACS(ClusterStatus cs) throws IOException {
AClusterStatus acs = new AClusterStatus();
acs.averageLoad = cs.getAverageLoad();
Collection<ServerName> deadServerNames = cs.getDeadServerNames();
Schema stringArraySchema = Schema.createArray(Schema.create(Schema.Type.STRING));
GenericData.Array<CharSequence> adeadServerNames = null;
if (deadServerNames != null) {
adeadServerNames = new GenericData.Array<CharSequence>(deadServerNames.size(), stringArraySchema);
for (ServerName deadServerName : deadServerNames) {
adeadServerNames.add(new Utf8(deadServerName.toString()));
}
} else {
adeadServerNames = new GenericData.Array<CharSequence>(0, stringArraySchema);
}
acs.deadServerNames = adeadServerNames;
acs.deadServers = cs.getDeadServers();
acs.hbaseVersion = new Utf8(cs.getHBaseVersion());
acs.regionsCount = cs.getRegionsCount();
acs.requestsCount = cs.getRequestsCount();
Collection<ServerName> hserverInfos = cs.getServers();
Schema s = Schema.createArray(AServerInfo.SCHEMA$);
GenericData.Array<AServerInfo> aserverInfos = null;
if (hserverInfos != null) {
aserverInfos = new GenericData.Array<AServerInfo>(hserverInfos.size(), s);
for (ServerName hsi : hserverInfos) {
aserverInfos.add(hsiToASI(hsi, cs.getLoad(hsi)));
}
} else {
aserverInfos = new GenericData.Array<AServerInfo>(0, s);
}
acs.serverInfos = aserverInfos;
acs.servers = cs.getServers().size();
return acs;
}
示例12: testReplicationStatus
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* Test for HBASE-9531
* put a few rows into htable1, which should be replicated to htable2
* create a ClusterStatus instance 'status' from HBaseAdmin
* test : status.getLoad(server).getReplicationLoadSourceList()
* test : status.getLoad(server).getReplicationLoadSink()
* * @throws Exception
*/
@Test(timeout = 300000)
public void testReplicationStatus() throws Exception {
LOG.info("testReplicationStatus");
try (Admin admin = utility1.getConnection().getAdmin()) {
final byte[] qualName = Bytes.toBytes("q");
Put p;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p = new Put(Bytes.toBytes("row" + i));
p.add(famName, qualName, Bytes.toBytes("val" + i));
htable1.put(p);
}
ClusterStatus status = admin.getClusterStatus();
for (ServerName server : status.getServers()) {
ServerLoad sl = status.getLoad(server);
List<ReplicationLoadSource> rLoadSourceList = sl.getReplicationLoadSourceList();
ReplicationLoadSink rLoadSink = sl.getReplicationLoadSink();
// check SourceList has at least one entry
assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() > 0));
// check Sink exist only as it is difficult to verify the value on the fly
assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ",
(rLoadSink.getAgeOfLastAppliedOp() >= 0));
assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ",
(rLoadSink.getTimeStampsOfLastAppliedOp() >= 0));
}
}
}
示例13: RegionLoadAdapter
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
public RegionLoadAdapter(HBaseAdmin admin, Map<byte[], HRegionInfo> regionMap, Args args) throws IOException {
long timestamp = System.currentTimeMillis();
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> serverNames = clusterStatus.getServers();
for (ServerName serverName : serverNames) {
HServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : serverLoad.getRegionsLoad().entrySet()) {
if (regionMap.get(entry.getKey()) != null)
regionLoadMap.put(regionMap.get(entry.getKey()), new RegionLoadDelegator(entry.getValue()));
}
}
Util.printVerboseMessage(args, "RegionLoadAdapter", timestamp);
}
示例14: clusterDetails
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
/**
* print cluster details
*
* @throws Exception
*/
@Test
public void clusterDetails() throws Exception {
ClusterStatus status = hbaseAdmin.getClusterStatus();
// status.getServerInfo(); //deprecated
for (ServerName server : status.getServers()) {
System.out.print("serverName: " + server.getServerName() + ", ");
System.out.print("hostname: " + server.getHostname() + ", ");
System.out.print("port: " + server.getPort() + ", ");
System.out.println("hostAndPort: " + server.getHostAndPort());
}
assertNotNull(status);
}
示例15: init
import org.apache.hadoop.hbase.ClusterStatus; //导入方法依赖的package包/类
private void init(RegionLocator regionLocator, Admin admin)
throws IOException {
if (!enabled(admin.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
//get regions for table
List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionLocation regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionInfo().getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = (regionLoad.getStorefileSizeMB() + regionLoad.getMemStoreSizeMB()) * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
}