本文整理汇总了Java中org.apache.hadoop.hbase.HServerLoad类的典型用法代码示例。如果您正苦于以下问题:Java HServerLoad类的具体用法?Java HServerLoad怎么用?Java HServerLoad使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HServerLoad类属于org.apache.hadoop.hbase包,在下文中一共展示了HServerLoad类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: regionServerStartup
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
/**
* Let the server manager know a new regionserver has come online
* @param ia The remote address
* @param port The remote port
* @param serverStartcode
* @param serverCurrentTime The current time of the region server in ms
* @return The ServerName we know this server as.
* @throws IOException
*/
ServerName regionServerStartup(final InetAddress ia, final int port,
final long serverStartcode, long serverCurrentTime)
throws IOException {
// Test for case where we get a region startup message from a regionserver
// that has been quickly restarted but whose znode expiration handler has
// not yet run, or from a server whose fail we are currently processing.
// Test its host+port combo is present in serverAddresstoServerInfo. If it
// is, reject the server and trigger its expiration. The next time it comes
// in, it should have been removed from serverAddressToServerInfo and queued
// for processing by ProcessServerShutdown.
ServerName sn = new ServerName(ia.getHostName(), port, serverStartcode);
checkClockSkew(sn, serverCurrentTime);
checkIsDead(sn, "STARTUP");
checkAlreadySameHostPort(sn);
recordNewServer(sn, HServerLoad.EMPTY_HSERVERLOAD);
return sn;
}
示例2: regionServerReport
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
void regionServerReport(ServerName sn, HServerLoad hsl)
throws YouAreDeadException, PleaseHoldException {
checkIsDead(sn, "REPORT");
if (!this.onlineServers.containsKey(sn)) {
// Already have this host+port combo and its just different start code?
checkAlreadySameHostPort(sn);
// Just let the server in. Presume master joining a running cluster.
// recordNewServer is what happens at the end of reportServerStartup.
// The only thing we are skipping is passing back to the regionserver
// the ServerName to use. Here we presume a master has already done
// that so we'll press on with whatever it gave us for ServerName.
recordNewServer(sn, hsl);
} else {
this.onlineServers.put(sn, hsl);
}
}
示例3: tryRegionServerReport
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
void tryRegionServerReport() throws IOException {
if (!keepLooping() && hbaseMaster == null) {
// the current server is stopping
return;
}
HServerLoad hsl = buildServerLoad();
// Why we do this?
this.requestCount.set(0);
try {
this.hbaseMaster.regionServerReport(this.serverNameFromMasterPOV.getVersionedBytes(), hsl);
} catch (IOException ioe) {
if (ioe instanceof RemoteException) {
ioe = ((RemoteException) ioe).unwrapRemoteException();
}
if (ioe instanceof YouAreDeadException) {
// This will be caught and handled as a fatal error in run()
throw ioe;
}
// Couldn't connect to the master, get location from zk and reconnect
// Method blocks until new master is found or we are stopped
getMaster();
}
}
示例4: tryRegionServerReport
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
void tryRegionServerReport()
throws IOException {
if (!keepLooping() && hbaseMaster == null) {
// the current server is stopping
return;
}
HServerLoad hsl = buildServerLoad();
// Why we do this?
this.requestCount.set(0);
try {
this.hbaseMaster.regionServerReport(this.serverNameFromMasterPOV.getVersionedBytes(), hsl);
} catch (IOException ioe) {
if (ioe instanceof RemoteException) {
ioe = ((RemoteException)ioe).unwrapRemoteException();
}
if (ioe instanceof YouAreDeadException) {
// This will be caught and handled as a fatal error in run()
throw ioe;
}
// Couldn't connect to the master, get location from zk and reconnect
// Method blocks until new master is found or we are stopped
getMaster();
}
}
示例5: tryRegionServerReport
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
void tryRegionServerReport()
throws IOException {
HServerLoad hsl = buildServerLoad();
// Why we do this?
this.requestCount.set(0);
try {
this.hbaseMaster.regionServerReport(this.serverNameFromMasterPOV.getVersionedBytes(), hsl);
} catch (IOException ioe) {
if (ioe instanceof RemoteException) {
ioe = ((RemoteException)ioe).unwrapRemoteException();
}
if (ioe instanceof YouAreDeadException) {
// This will be caught and handled as a fatal error in run()
throw ioe;
}
// Couldn't connect to the master, get location from zk and reconnect
// Method blocks until new master is found or we are stopped
getMaster();
}
}
示例6: hrlToARL
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
static public ARegionLoad hrlToARL(HServerLoad.RegionLoad rl) throws IOException {
ARegionLoad arl = new ARegionLoad();
arl.memStoreSizeMB = rl.getMemStoreSizeMB();
arl.name = ByteBuffer.wrap(rl.getName());
arl.storefileIndexSizeMB = rl.getStorefileIndexSizeMB();
arl.storefiles = rl.getStorefiles();
arl.storefileSizeMB = rl.getStorefileSizeMB();
arl.stores = rl.getStores();
return arl;
}
示例7: hslToASL
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
static public AServerLoad hslToASL(HServerLoad hsl) throws IOException {
AServerLoad asl = new AServerLoad();
asl.load = hsl.getLoad();
asl.maxHeapMB = hsl.getMaxHeapMB();
asl.memStoreSizeInMB = hsl.getMemStoreSizeInMB();
asl.numberOfRegions = hsl.getNumberOfRegions();
asl.numberOfRequests = hsl.getNumberOfRequests();
Collection<HServerLoad.RegionLoad> regionLoads = hsl.getRegionsLoad().values();
Schema s = Schema.createArray(ARegionLoad.SCHEMA$);
GenericData.Array<ARegionLoad> aregionLoads = null;
if (regionLoads != null) {
aregionLoads = new GenericData.Array<ARegionLoad>(regionLoads.size(), s);
for (HServerLoad.RegionLoad rl : regionLoads) {
aregionLoads.add(hrlToARL(rl));
}
} else {
aregionLoads = new GenericData.Array<ARegionLoad>(0, s);
}
asl.regionsLoad = aregionLoads;
asl.storefileIndexSizeInMB = hsl.getStorefileIndexSizeInMB();
asl.storefiles = hsl.getStorefiles();
asl.storefileSizeInMB = hsl.getStorefileSizeInMB();
asl.usedHeapMB = hsl.getUsedHeapMB();
return asl;
}
示例8: hsiToASI
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
static public AServerInfo hsiToASI(ServerName sn, HServerLoad hsl) throws IOException {
AServerInfo asi = new AServerInfo();
asi.infoPort = -1;
asi.load = hslToASL(hsl);
asi.serverAddress = hsaToASA(new HServerAddress(sn.getHostname(), sn.getPort()));
asi.serverName = new Utf8(sn.toString());
asi.startCode = sn.getStartcode();
return asi;
}
示例9: dumpServers
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
private void dumpServers(HMaster master, PrintWriter out) {
Map<ServerName, HServerLoad> servers =
master.getServerManager().getOnlineServers();
for (Map.Entry<ServerName, HServerLoad> e : servers.entrySet()) {
out.println(e.getKey() + ": " + e.getValue());
}
}
示例10: getLoad
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
/**
* @param address
* @return HServerLoad if serverName is known else null
* @deprecated Use {@link #getLoad(HServerAddress)}
*/
public HServerLoad getLoad(final HServerAddress address) {
ServerName sn = new ServerName(address.toString(), ServerName.NON_STARTCODE);
ServerName actual =
ServerName.findServerWithSameHostnamePort(this.getOnlineServersList(), sn);
return actual == null? null: getLoad(actual);
}
示例11: getAverageLoad
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
/**
* Compute the average load across all region servers.
* Currently, this uses a very naive computation - just uses the number of
* regions being served, ignoring stats about number of requests.
* @return the average load
*/
public double getAverageLoad() {
int totalLoad = 0;
int numServers = 0;
double averageLoad = 0.0;
for (HServerLoad hsl: this.onlineServers.values()) {
numServers++;
totalLoad += hsl.getNumberOfRegions();
}
averageLoad = (double)totalLoad / (double)numServers;
return averageLoad;
}
示例12: getOnlineServers
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
/**
* @return Read-only map of servers to serverinfo
*/
public Map<ServerName, HServerLoad> getOnlineServers() {
// Presumption is that iterating the returned Map is OK.
synchronized (this.onlineServers) {
return Collections.unmodifiableMap(this.onlineServers);
}
}
示例13: regionServerReport
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
@Override
public void regionServerReport(final byte [] sn, final HServerLoad hsl)
throws IOException {
this.serverManager.regionServerReport(ServerName.parseVersionedServerName(sn), hsl);
if (hsl != null && this.metrics != null) {
// Up our metrics.
this.metrics.incrementRequests(hsl.getTotalNumberOfRequests());
}
}
示例14: getRegionServers
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
@Override
public Map<String, HServerLoad> getRegionServers() {
Map<String, HServerLoad> data = new HashMap<String, HServerLoad>();
for (final Entry<ServerName, HServerLoad> entry :
master.getServerManager().getOnlineServers().entrySet()) {
data.put(entry.getKey().getServerName(),
entry.getValue());
}
return data;
}
示例15: buildServerLoad
import org.apache.hadoop.hbase.HServerLoad; //导入依赖的package包/类
HServerLoad buildServerLoad() {
Collection<HRegion> regions = getOnlineRegionsLocalContext();
TreeMap<byte[], HServerLoad.RegionLoad> regionLoads =
new TreeMap<byte[], HServerLoad.RegionLoad>(Bytes.BYTES_COMPARATOR);
for (HRegion region : regions) {
regionLoads.put(region.getRegionName(), createRegionLoad(region));
}
MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
return new HServerLoad(requestCount.get(), (int) metrics.getRequests(),
(int) (memory.getUsed() / 1024 / 1024), (int) (memory.getMax() / 1024 / 1024), regionLoads,
this.hlog.getCoprocessorHost().getCoprocessors());
}