本文整理汇总了Java中org.apache.hadoop.hbase.util.Addressing类的典型用法代码示例。如果您正苦于以下问题:Java Addressing类的具体用法?Java Addressing怎么用?Java Addressing使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Addressing类属于org.apache.hadoop.hbase.util包,在下文中一共展示了Addressing类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: stopRegionServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* Stop the designated regionserver
* @param hostnamePort Hostname and port delimited by a <code>:</code> as in
* <code>example.org:1234</code>
* @throws IOException if a remote or network exception occurs
*/
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
String hostname = Addressing.parseHostname(hostnamePort);
int port = Addressing.parsePort(hostnamePort);
AdminService.BlockingInterface admin =
this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
StopServerRequest request = RequestConverter.buildStopServerRequest(
"Called by admin client " + this.connection.toString());
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setPriority(HConstants.HIGH_QOS);
try {
// TODO: this does not do retries, it should. Set priority and timeout in controller
admin.stopServer(controller, request);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例2: dataToServerName
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
private static ServerName dataToServerName(final byte [] data) {
// The str returned could be old style -- pre hbase-1502 -- which was
// hostname and port seperated by a colon rather than hostname, port and
// startcode delimited by a ','.
if (data == null || data.length <= 0) return null;
String str = Bytes.toString(data);
int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
if (index != -1) {
// Presume its ServerName.toString() format.
return ServerName.parseServerName(str);
}
// Presume it a hostname:port format.
String hostname = Addressing.parseHostname(str);
int port = Addressing.parsePort(str);
return new ServerName(hostname, port, -1L);
}
示例3: stopRegionServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* Stop the designated regionserver
* @param hostnamePort Hostname and port delimited by a <code>:</code> as in
* <code>example.org:1234</code>
* @throws IOException if a remote or network exception occurs
*/
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
String hostname = Addressing.parseHostname(hostnamePort);
int port = Addressing.parsePort(hostnamePort);
AdminService.BlockingInterface admin =
this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
StopServerRequest request = RequestConverter.buildStopServerRequest(
"Called by admin client " + this.connection.toString());
try {
admin.stopServer(null, request);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例4: doGet
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response) throws ServletException, IOException {
String redirectHost = regionServerHostname;
if(redirectHost == null) {
redirectHost = request.getServerName();
if(!Addressing.isLocalAddress(InetAddress.getByName(redirectHost))) {
LOG.warn("Couldn't resolve '" + redirectHost + "' as an address local to this node and '" +
MASTER_HOSTNAME_KEY + "' is not set; client will get a HTTP 400 response. If " +
"your HBase deployment relies on client accessible names that the region server process " +
"can't resolve locally, then you should set the previously mentioned configuration variable " +
"to an appropriate hostname.");
// no sending client provided input back to the client, so the goal host is just in the logs.
response.sendError(400, "Request was to a host that I can't resolve for any of the network interfaces on " +
"this node. If this is due to an intermediary such as an HTTP load balancer or other proxy, your HBase " +
"administrator can set '" + MASTER_HOSTNAME_KEY + "' to point to the correct hostname.");
return;
}
}
// TODO this scheme should come from looking at the scheme registered in the infoserver's http server for the
// host and port we're using, but it's buried way too deep to do that ATM.
String redirectUrl = request.getScheme() + "://"
+ redirectHost + ":" + regionServerInfoPort
+ request.getRequestURI();
response.sendRedirect(redirectUrl);
}
示例5: stopRegionServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
@Override
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
String hostname = Addressing.parseHostname(hostnamePort);
int port = Addressing.parsePort(hostnamePort);
final AdminService.BlockingInterface admin =
this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
// TODO: There is no timeout on this controller. Set one!
HBaseRpcController controller = rpcControllerFactory.newController();
controller.setPriority(HConstants.HIGH_QOS);
StopServerRequest request = RequestConverter.buildStopServerRequest(
"Called by admin client " + this.connection.toString());
try {
admin.stopServer(controller, request);
} catch (Exception e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
示例6: putUpJettyServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
private int putUpJettyServer() throws IOException {
if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
return -1;
}
int infoPort = conf.getInt("hbase.master.info.port.orig",
HConstants.DEFAULT_MASTER_INFOPORT);
// -1 is for disabling info server, so no redirecting
if (infoPort < 0 || infoServer == null) {
return -1;
}
String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
String msg =
"Failed to start redirecting jetty server. Address " + addr
+ " does not belong to this host. Correct configuration parameter: "
+ "hbase.master.info.bindAddress";
LOG.error(msg);
throw new IOException(msg);
}
RedirectServlet.regionServerInfoPort = infoServer.getPort();
if(RedirectServlet.regionServerInfoPort == infoPort) {
return infoPort;
}
masterJettyServer = new org.mortbay.jetty.Server();
Connector connector = new SelectChannelConnector();
connector.setHost(addr);
connector.setPort(infoPort);
masterJettyServer.addConnector(connector);
masterJettyServer.setStopAtShutdown(true);
Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
context.addServlet(RedirectServlet.class, "/*");
try {
masterJettyServer.start();
} catch (Exception e) {
throw new IOException("Failed to start redirecting jetty server", e);
}
return connector.getLocalPort();
}
示例7: testRegexPatterns
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
@Test
public void testRegexPatterns() {
assertTrue(Pattern.matches(Addressing.VALID_PORT_REGEX, "123"));
assertFalse(Pattern.matches(Addressing.VALID_PORT_REGEX, ""));
assertTrue(ServerName.SERVERNAME_PATTERN.matcher("www1.example.org,1234,567").matches());
ServerName.parseServerName("a.b.c,58102,1319771740322");
ServerName.parseServerName("192.168.1.199,58102,1319771740322");
ServerName.parseServerName("a.b.c:58102");
ServerName.parseServerName("192.168.1.199:58102");
}
示例8: MemcachedBlockCache
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
public MemcachedBlockCache(Configuration c) throws IOException {
LOG.info("Creating MemcachedBlockCache");
long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT);
long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT);
boolean optimize = c.getBoolean(MEMCACHED_OPTIMIZE_KEY, MEMCACHED_OPTIMIZE_DEFAULT);
ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder()
.setOpTimeout(opTimeout)
.setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out
.setFailureMode(FailureMode.Redistribute)
.setShouldOptimize(optimize)
.setDaemon(true) // Don't keep threads around past the end of days.
.setUseNagleAlgorithm(false) // Ain't nobody got time for that
.setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // Much larger just in case
// Assume only the localhost is serving memecached.
// A la mcrouter or co-locating memcached with split regionservers.
//
// If this config is a pool of memecached servers they will all be used according to the
// default hashing scheme defined by the memcache client. Spy Memecache client in this
// case.
String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211");
String[] servers = serverListString.split(",");
List<InetSocketAddress> serverAddresses = new ArrayList<InetSocketAddress>(servers.length);
for (String s:servers) {
serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s));
}
client = new MemcachedClient(builder.build(), serverAddresses);
}
示例9: getIpAddressBytes
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* @return Some IPv4/IPv6 address available on the current machine that is up, not virtual
* and not a loopback address. Empty array if none can be found or error occured.
*/
public static byte[] getIpAddressBytes() {
try {
return Addressing.getIpAddress().getAddress();
} catch (IOException ex) {
LOG.warn("Failed to get IP address bytes", ex);
}
return new byte[0];
}
示例10: parseFrom
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* Get a ServerName from the passed in data bytes.
* @param data Data with a serialize server name in it; can handle the old style
* servername where servername was host and port. Works too with data that
* begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
* has a serialized {@link ServerName} in it.
* @return Returns null if <code>data</code> is null else converts passed data
* to a ServerName instance.
* @throws DeserializationException
*/
public static ServerName parseFrom(final byte [] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;
if (ProtobufUtil.isPBMagicPrefix(data)) {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
try {
ZooKeeperProtos.Master rss =
ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen);
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getMaster();
return valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode());
} catch (InvalidProtocolBufferException e) {
// A failed parse of the znode is pretty catastrophic. Rather than loop
// retrying hoping the bad bytes will changes, and rather than change
// the signature on this method to add an IOE which will send ripples all
// over the code base, throw a RuntimeException. This should "never" happen.
// Fail fast if it does.
throw new DeserializationException(e);
}
}
// The str returned could be old style -- pre hbase-1502 -- which was
// hostname and port seperated by a colon rather than hostname, port and
// startcode delimited by a ','.
String str = Bytes.toString(data);
int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
if (index != -1) {
// Presume its ServerName serialized with versioned bytes.
return ServerName.parseVersionedServerName(data);
}
// Presume it a hostname:port format.
String hostname = Addressing.parseHostname(str);
int port = Addressing.parsePort(str);
return valueOf(hostname, port, -1L);
}
示例11: getHostnamePort
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* @return String made of hostname and port formatted as per {@link Addressing#createHostAndPortStr(String, int)}
*/
public synchronized String getHostnamePort() {
if (this.cachedHostnamePort == null) {
this.cachedHostnamePort =
Addressing.createHostAndPortStr(this.hostname, this.port);
}
return this.cachedHostnamePort;
}
示例12: stopRegionServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* Stop the designated regionserver
* @param hostnamePort Hostname and port delimited by a <code>:</code> as in
* <code>example.org:1234</code>
* @throws IOException if a remote or network exception occurs
*/
public synchronized void stopRegionServer(final String hostnamePort) throws IOException {
String hostname = Addressing.parseHostname(hostnamePort);
int port = Addressing.parsePort(hostnamePort);
HRegionInterface rs = this.connection.getHRegionConnection(hostname, port);
rs.stop("Called by admin client " + this.connection.toString());
}
示例13: putUpJettyServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
private int putUpJettyServer() throws IOException {
if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
return -1;
}
int infoPort = conf.getInt("hbase.master.info.port.orig",
HConstants.DEFAULT_MASTER_INFOPORT);
// -1 is for disabling info server, so no redirecting
if (infoPort < 0 || infoServer == null) {
return -1;
}
String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
String msg =
"Failed to start redirecting jetty server. Address " + addr
+ " does not belong to this host. Correct configuration parameter: "
+ "hbase.master.info.bindAddress";
LOG.error(msg);
throw new IOException(msg);
}
RedirectServlet.regionServerInfoPort = infoServer.getPort();
masterJettyServer = new org.mortbay.jetty.Server();
Connector connector = new SelectChannelConnector();
connector.setHost(addr);
connector.setPort(infoPort);
masterJettyServer.addConnector(connector);
masterJettyServer.setStopAtShutdown(true);
Context context = new Context(masterJettyServer, "/", Context.NO_SESSIONS);
context.addServlet(RedirectServlet.class, "/*");
try {
masterJettyServer.start();
} catch (Exception e) {
throw new IOException("Failed to start redirecting jetty server", e);
}
return connector.getLocalPort();
}
示例14: stopRegionServer
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* Stop the designated regionserver
* @param hostnamePort Hostname and port delimited by a <code>:</code> as in
* <code>example.org:1234</code>
* @throws IOException if a remote or network exception occurs
*/
public synchronized void stopRegionServer(final String hostnamePort)
throws IOException {
String hostname = Addressing.parseHostname(hostnamePort);
int port = Addressing.parsePort(hostnamePort);
AdminService.BlockingInterface admin =
this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
StopServerRequest request = RequestConverter.buildStopServerRequest(
"Called by admin client " + this.connection.toString());
try {
admin.stopServer(null, request);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例15: parseFrom
import org.apache.hadoop.hbase.util.Addressing; //导入依赖的package包/类
/**
* Get a ServerName from the passed in data bytes.
* @param data Data with a serialize server name in it; can handle the old style
* servername where servername was host and port. Works too with data that
* begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
* has a serialized {@link ServerName} in it.
* @return Returns null if <code>data</code> is null else converts passed data
* to a ServerName instance.
* @throws DeserializationException
*/
public static ServerName parseFrom(final byte [] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;
if (ProtobufUtil.isPBMagicPrefix(data)) {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
try {
MetaRegionServer rss =
MetaRegionServer.PARSER.parseFrom(data, prefixLen, data.length - prefixLen);
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getServer();
return valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode());
} catch (InvalidProtocolBufferException e) {
// A failed parse of the znode is pretty catastrophic. Rather than loop
// retrying hoping the bad bytes will changes, and rather than change
// the signature on this method to add an IOE which will send ripples all
// over the code base, throw a RuntimeException. This should "never" happen.
// Fail fast if it does.
throw new DeserializationException(e);
}
}
// The str returned could be old style -- pre hbase-1502 -- which was
// hostname and port seperated by a colon rather than hostname, port and
// startcode delimited by a ','.
String str = Bytes.toString(data);
int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
if (index != -1) {
// Presume its ServerName serialized with versioned bytes.
return ServerName.parseVersionedServerName(data);
}
// Presume it a hostname:port format.
String hostname = Addressing.parseHostname(str);
int port = Addressing.parsePort(str);
return valueOf(hostname, port, -1L);
}