本文整理汇总了Java中org.apache.hadoop.hbase.ServerName.getPort方法的典型用法代码示例。如果您正苦于以下问题:Java ServerName.getPort方法的具体用法?Java ServerName.getPort怎么用?Java ServerName.getPort使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ServerName
的用法示例。
在下文中一共展示了ServerName.getPort方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: isDeadServer
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Check if we know if a server is dead.
*
* @param sn the server name to check.
* @return true if we know for sure that the server is dead, false otherwise.
*/
public boolean isDeadServer(ServerName sn) {
if (sn.getStartcode() <= 0) {
return false;
}
for (ServerName dead : deadServers) {
if (dead.getStartcode() >= sn.getStartcode() &&
dead.getPort() == sn.getPort() &&
dead.getHostname().equals(sn.getHostname())) {
return true;
}
}
return false;
}
示例2: cancelConnections
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Interrupt the connections to the given ip:port server. This should be called if the server
* is known as actually dead. This will not prevent current operation to be retried, and,
* depending on their own behavior, they may retry on the same server. This can be a feature,
* for example at startup. In any case, they're likely to get connection refused (if the
* process died) or no route to host: i.e. their next retries should be faster and with a
* safe exception.
*/
@Override
public void cancelConnections(ServerName sn) {
synchronized (connections) {
for (Connection connection : connections.values()) {
if (connection.isAlive() &&
connection.getRemoteAddress().getPort() == sn.getPort() &&
connection.getRemoteAddress().getHostName().equals(sn.getHostname())) {
LOG.info("The server on " + sn.toString() +
" is dead - stopping the connection " + connection.remoteId);
connection.interrupt(); // We're interrupting a Reader. It means we want it to finish.
// This will close the connection as well.
}
}
}
}
示例3: getRegionInfo
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Override
public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
try {
byte[] row = getBytes(searchRow);
Result startRowResult =
getRowOrBefore(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY);
if (startRowResult == null) {
throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row="
+ Bytes.toStringBinary(row));
}
// find region start and end keys
HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(startRowResult);
if (regionInfo == null) {
throw new IOException("HRegionInfo REGIONINFO was null or " +
" empty in Meta for row="
+ Bytes.toStringBinary(row));
}
TRegionInfo region = new TRegionInfo();
region.setStartKey(regionInfo.getStartKey());
region.setEndKey(regionInfo.getEndKey());
region.id = regionInfo.getRegionId();
region.setName(regionInfo.getRegionName());
region.version = regionInfo.getVersion();
// find region assignment to server
ServerName serverName = HRegionInfo.getServerName(startRowResult);
if (serverName != null) {
region.setServerName(Bytes.toBytes(serverName.getHostname()));
region.port = serverName.getPort();
}
return region;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
}
}
示例4: toServerName
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Convert a ServerName to a protocol buffer ServerName
*
* @param serverName the ServerName to convert
* @return the converted protocol buffer ServerName
* @see #toServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)
*/
public static HBaseProtos.ServerName
toServerName(final ServerName serverName) {
if (serverName == null) return null;
HBaseProtos.ServerName.Builder builder =
HBaseProtos.ServerName.newBuilder();
builder.setHostName(serverName.getHostname());
if (serverName.getPort() >= 0) {
builder.setPort(serverName.getPort());
}
if (serverName.getStartcode() >= 0) {
builder.setStartCode(serverName.getStartcode());
}
return builder.build();
}
示例5: cancelConnections
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* Interrupt the connections to the given ip:port server. This should be called if the server
* is known as actually dead. This will not prevent current operation to be retried, and,
* depending on their own behavior, they may retry on the same server. This can be a feature,
* for example at startup. In any case, they're likely to get connection refused (if the
* process died) or no route to host: i.e. there next retries should be faster and with a
* safe exception.
*
* @param sn server to cancel connections for
*/
@Override
public void cancelConnections(ServerName sn) {
synchronized (connections) {
for (AsyncRpcChannel rpcChannel : connections.values()) {
if (rpcChannel.isAlive() &&
rpcChannel.address.getPort() == sn.getPort() &&
rpcChannel.address.getHostName().contentEquals(sn.getHostname())) {
LOG.info("The server on " + sn.toString() +
" is dead - stopping the connection " + rpcChannel.toString());
rpcChannel.close(null);
}
}
}
}
示例6: RpcChannelImplementation
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* @param channelOperationTimeout - the default timeout when no timeout is given
*/
protected RpcChannelImplementation(final AsyncRpcClient rpcClient,
final ServerName sn, final User ticket, int channelOperationTimeout) {
this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort());
this.rpcClient = rpcClient;
this.ticket = ticket;
this.channelOperationTimeout = channelOperationTimeout;
}
示例7: BlockingRpcChannelImplementation
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
/**
* @param channelOperationTimeout - the default timeout when no timeout is given
*/
protected BlockingRpcChannelImplementation(final AbstractRpcClient rpcClient,
final ServerName sn, final User ticket, int channelOperationTimeout) {
this.isa = new InetSocketAddress(sn.getHostname(), sn.getPort());
this.rpcClient = rpcClient;
this.ticket = ticket;
this.channelOperationTimeout = channelOperationTimeout;
}
示例8: checkTableInfo
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
void checkTableInfo(TableInfoModel model) {
assertEquals(model.getName(), TABLE.getNameAsString());
Iterator<TableRegionModel> regions = model.getRegions().iterator();
assertTrue(regions.hasNext());
while (regions.hasNext()) {
TableRegionModel region = regions.next();
boolean found = false;
for (HRegionLocation e: regionMap) {
HRegionInfo hri = e.getRegionInfo();
String hriRegionName = hri.getRegionNameAsString();
String regionName = region.getName();
if (hriRegionName.equals(regionName)) {
found = true;
byte[] startKey = hri.getStartKey();
byte[] endKey = hri.getEndKey();
ServerName serverName = e.getServerName();
InetSocketAddress sa =
new InetSocketAddress(serverName.getHostname(), serverName.getPort());
String location = sa.getHostName() + ":" +
Integer.valueOf(sa.getPort());
assertEquals(hri.getRegionId(), region.getId());
assertTrue(Bytes.equals(startKey, region.getStartKey()));
assertTrue(Bytes.equals(endKey, region.getEndKey()));
assertEquals(location, region.getLocation());
break;
}
}
assertTrue(found);
}
}
示例9: testClusterStatus
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Ignore @Test (expected = RegionServerStoppedException.class)
public void testClusterStatus() throws Exception {
TableName tn =
TableName.valueOf("testClusterStatus");
byte[] cf = "cf".getBytes();
byte[] rk = "rk1".getBytes();
JVMClusterUtil.RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
rs.waitForServerOnline();
final ServerName sn = rs.getRegionServer().getServerName();
HTable t = TEST_UTIL.createTable(tn, cf);
TEST_UTIL.waitTableAvailable(tn);
while(TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().isRegionsInTransition()){
Thread.sleep(1);
}
final HConnectionImplementation hci = (HConnectionImplementation)t.getConnection();
while (t.getRegionLocation(rk).getPort() != sn.getPort()){
TEST_UTIL.getHBaseAdmin().move(t.getRegionLocation(rk).getRegionInfo().
getEncodedNameAsBytes(), Bytes.toBytes(sn.toString()));
while(TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
getRegionStates().isRegionsInTransition()){
Thread.sleep(1);
}
hci.clearRegionCache(tn);
}
Assert.assertNotNull(hci.clusterStatusListener);
TEST_UTIL.assertRegionOnServer(t.getRegionLocation(rk).getRegionInfo(), sn, 20000);
Put p1 = new Put(rk);
p1.add(cf, "qual".getBytes(), "val".getBytes());
t.put(p1);
rs.getRegionServer().abort("I'm dead");
// We want the status to be updated. That's a least 10 second
TEST_UTIL.waitFor(40000, 1000, true, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getHBaseCluster().getMaster().getServerManager().
getDeadServers().isDeadServer(sn);
}
});
TEST_UTIL.waitFor(40000, 1000, true, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return hci.clusterStatusListener.isDeadServer(sn);
}
});
t.close();
hci.getClient(sn); // will throw an exception: RegionServerStoppedException
}
示例10: testNonCachedGetRegionLocation
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
@Test
/**
* Tests the non cached version of getRegionLocator by moving a region.
*/
public void testNonCachedGetRegionLocation() throws Exception {
// Test Initialization.
TableName TABLE = TableName.valueOf("testNonCachedGetRegionLocation");
byte [] family1 = Bytes.toBytes("f1");
byte [] family2 = Bytes.toBytes("f2");
try (HTable table = TEST_UTIL.createTable(TABLE, new byte[][] {family1, family2}, 10);
Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration())) {
Map <HRegionInfo, ServerName> regionsMap = table.getRegionLocations();
assertEquals(1, regionsMap.size());
HRegionInfo regionInfo = regionsMap.keySet().iterator().next();
ServerName addrBefore = regionsMap.get(regionInfo);
// Verify region location before move.
HRegionLocation addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
HRegionLocation addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);
assertEquals(addrBefore.getPort(), addrCache.getPort());
assertEquals(addrBefore.getPort(), addrNoCache.getPort());
ServerName addrAfter = null;
// Now move the region to a different server.
for (int i = 0; i < SLAVES; i++) {
HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(i);
ServerName addr = regionServer.getServerName();
if (addr.getPort() != addrBefore.getPort()) {
admin.move(regionInfo.getEncodedNameAsBytes(),
Bytes.toBytes(addr.toString()));
// Wait for the region to move.
Thread.sleep(5000);
addrAfter = addr;
break;
}
}
// Verify the region was moved.
addrCache = table.getRegionLocation(regionInfo.getStartKey(), false);
addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true);
assertNotNull(addrAfter);
assertTrue(addrAfter.getPort() != addrCache.getPort());
assertEquals(addrAfter.getPort(), addrNoCache.getPort());
}
}
示例11: RegionMovedException
import org.apache.hadoop.hbase.ServerName; //导入方法依赖的package包/类
public RegionMovedException(ServerName serverName, long locationSeqNum) {
this.hostname = serverName.getHostname();
this.port = serverName.getPort();
this.startCode = serverName.getStartcode();
this.locationSeqNum = locationSeqNum;
}