本文整理匯總了Java中org.apache.hadoop.hbase.ServerName類的典型用法代碼示例。如果您正苦於以下問題:Java ServerName類的具體用法?Java ServerName怎麽用?Java ServerName使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
ServerName類屬於org.apache.hadoop.hbase包,在下文中一共展示了ServerName類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getRegionServer
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Returns the {@link ServerName} from catalog table {@link Result}
* where the region is transitioning. It should be the same as
* {@link HRegionInfo#getServerName(Result)} if the server is at OPEN state.
* @param r Result to pull the transitioning server name from
* @return A ServerName instance or {@link HRegionInfo#getServerName(Result)}
* if necessary fields not found or empty.
*/
static ServerName getRegionServer(final Result r, int replicaId) {
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId));
if (cell == null || cell.getValueLength() == 0) {
RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
if (locations != null) {
HRegionLocation location = locations.getRegionLocation(replicaId);
if (location != null) {
return location.getServerName();
}
}
return null;
}
return ServerName.parseServerName(Bytes.toString(cell.getValueArray(),
cell.getValueOffset(), cell.getValueLength()));
}
示例2: prepare
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Two responsibilities
* - if the call is already completed (by another replica) stops the retries.
* - set the location to the right region, depending on the replica.
*/
@Override
public void prepare(final boolean reload) throws IOException {
if (controller.isCanceled()) return;
if (Thread.interrupted()) {
throw new InterruptedIOException();
}
if (reload || location == null) {
RegionLocations rl = getRegionLocations(false, id, cConnection, tableName, get.getRow());
location = id < rl.size() ? rl.getRegionLocation(id) : null;
}
if (location == null || location.getServerName() == null) {
// With this exception, there will be a retry. The location can be null for a replica
// when the table is created or after a split.
throw new HBaseIOException("There is no location for replica id #" + id);
}
ServerName dest = location.getServerName();
setStub(cConnection.getClient(dest));
}
示例3: getSplits
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
@Override
public Iterator<RegionWork> getSplits(ExecutionNodeMap executionNodes) {
List<RegionWork> work = new ArrayList<>();
for (Entry<HRegionInfo, ServerName> entry : regionsToScan.entrySet()) {
long bytes = statsCalculator.getRegionSizeInBytes(entry.getKey().getRegionName());
String name = entry.getValue().getHostname();
NodeEndpoint endpoint = executionNodes.getEndpoint(name);
if(endpoint != null){
work.add(new RegionWork(entry.getKey(), bytes, new EndpointAffinity(endpoint, bytes)));
} else {
work.add(new RegionWork(entry.getKey(), bytes));
}
}
return work.iterator();
}
示例4: testGetPreviousRecoveryMode
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
@Ignore("DLR is broken by HBASE-12751") @Test(timeout=60000)
public void testGetPreviousRecoveryMode() throws Exception {
LOG.info("testGetPreviousRecoveryMode");
SplitLogCounters.resetCounters();
// Not actually enabling DLR for the cluster, just for the ZkCoordinatedStateManager to use.
// The test is just manipulating ZK manually anyways.
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"),
new SplitLogTask.Unassigned(
ServerName.valueOf("mgr,1,1"), RecoveryMode.LOG_SPLITTING).toByteArray(),
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
slm = new SplitLogManager(ds, conf, stopper, master, DUMMY_MASTER);
LOG.info("Mode1=" + slm.getRecoveryMode());
assertTrue(slm.isLogSplitting());
zkw.getRecoverableZooKeeper().delete(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), -1);
LOG.info("Mode2=" + slm.getRecoveryMode());
slm.setRecoveryMode(false);
LOG.info("Mode3=" + slm.getRecoveryMode());
assertTrue("Mode4=" + slm.getRecoveryMode(), slm.isLogReplaying());
}
示例5: getDeployedHRIs
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Get region info from local cluster.
*/
Map<ServerName, List<String>> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> regionServers = status.getServers();
Map<ServerName, List<String>> mm =
new HashMap<ServerName, List<String>>();
for (ServerName hsi : regionServers) {
AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
// list all online regions from this region server
List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
List<String> regionNames = new ArrayList<String>();
for (HRegionInfo hri : regions) {
regionNames.add(hri.getRegionNameAsString());
}
mm.put(hsi, regionNames);
}
return mm;
}
示例6: closeRegion
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* A helper to close a region given a region name
* using admin protocol.
*
* @param admin
* @param regionName
* @param versionOfClosingNode
* @return true if the region is closed
* @throws IOException
*/
public static boolean closeRegion(final RpcController controller,
final AdminService.BlockingInterface admin,
final ServerName server,
final byte[] regionName,
final int versionOfClosingNode, final ServerName destinationServer,
final boolean transitionInZK) throws IOException {
CloseRegionRequest closeRegionRequest =
RequestConverter.buildCloseRegionRequest(server,
regionName, versionOfClosingNode, destinationServer, transitionInZK);
try {
CloseRegionResponse response = admin.closeRegion(controller, closeRegionRequest);
return ResponseConverter.isClosed(response);
} catch (ServiceException se) {
throw getRemoteException(se);
}
}
示例7: getCurrentServers
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/** Returns current region servers - active master */
protected ServerName[] getCurrentServers() throws IOException {
ClusterStatus clusterStatus = cluster.getClusterStatus();
Collection<ServerName> regionServers = clusterStatus.getServers();
int count = regionServers == null ? 0 : regionServers.size();
if (count <= 0) {
return new ServerName [] {};
}
ServerName master = clusterStatus.getMaster();
if (master == null || !regionServers.contains(master)) {
return regionServers.toArray(new ServerName[count]);
}
if (count == 1) {
return new ServerName [] {};
}
ArrayList<ServerName> tmp = new ArrayList<ServerName>(count);
tmp.addAll(regionServers);
tmp.remove(master);
return tmp.toArray(new ServerName[count-1]);
}
示例8: regionsToAssignWithServerName
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* @param regionsInMeta
* @return List of regions neither in transition nor assigned.
* @throws IOException
*/
private Map<HRegionInfo, ServerName> regionsToAssignWithServerName(
final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException {
Map<HRegionInfo, ServerName> regionsToAssign =
new HashMap<HRegionInfo, ServerName>(regionsInMeta.size());
RegionStates regionStates = this.assignmentManager.getRegionStates();
for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) {
HRegionInfo hri = regionLocation.getFirst();
ServerName sn = regionLocation.getSecond();
if (regionStates.isRegionOffline(hri)) {
regionsToAssign.put(hri, sn);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping assign for the region " + hri + " during enable table "
+ hri.getTable() + " because its already in tranition or assigned.");
}
}
}
return regionsToAssign;
}
示例9: startSplitTransaction
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Creates a new ephemeral node in the PENDING_SPLIT state for the specified region. Create it
* ephemeral in case regionserver dies mid-split.
* <p>
* Does not transition nodes from other states. If a node already exists for this region, an
* Exception will be thrown.
* @param parent region to be created as offline
* @param serverName server event originates from
* @param hri_a daughter region
* @param hri_b daughter region
* @throws IOException
*/
@Override
public void startSplitTransaction(HRegion parent, ServerName serverName, HRegionInfo hri_a,
HRegionInfo hri_b) throws IOException {
HRegionInfo region = parent.getRegionInfo();
try {
LOG.debug(watcher.prefix("Creating ephemeral node for " + region.getEncodedName()
+ " in PENDING_SPLIT state"));
byte[] payload = HRegionInfo.toDelimitedByteArray(hri_a, hri_b);
RegionTransition rt =
RegionTransition.createRegionTransition(RS_ZK_REQUEST_REGION_SPLIT,
region.getRegionName(), serverName, payload);
String node = ZKAssign.getNodeName(watcher, region.getEncodedName());
if (!ZKUtil.createEphemeralNodeAndWatch(watcher, node, rt.toByteArray())) {
throw new IOException("Failed create of ephemeral " + node);
}
} catch (KeeperException e) {
throw new IOException("Failed creating PENDING_SPLIT znode on "
+ parent.getRegionInfo().getRegionNameAsString(), e);
}
}
示例10: SplitLogManager
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Its OK to construct this object even when region-servers are not online. It does lookup the
* orphan tasks in coordination engine but it doesn't block waiting for them to be done.
* @param server the server instance
* @param conf the HBase configuration
* @param stopper the stoppable in case anything is wrong
* @param master the master services
* @param serverName the master server name
* @throws IOException
*/
public SplitLogManager(Server server, Configuration conf, Stoppable stopper,
MasterServices master, ServerName serverName) throws IOException {
this.server = server;
this.conf = conf;
this.stopper = stopper;
this.choreService = new ChoreService(serverName.toString() + "_splitLogManager_");
if (server.getCoordinatedStateManager() != null) {
SplitLogManagerCoordination coordination =
((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
.getSplitLogManagerCoordination();
Set<String> failedDeletions = Collections.synchronizedSet(new HashSet<String>());
SplitLogManagerDetails details =
new SplitLogManagerDetails(tasks, master, failedDeletions, serverName);
coordination.setDetails(details);
coordination.init();
// Determine recovery mode
}
this.unassignedTimeout =
conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT);
this.timeoutMonitor =
new TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 1000),
stopper);
choreService.scheduleChore(timeoutMonitor);
}
示例11: updateMetaWithFavoredNodesInfo
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Update meta table with favored nodes info
* @param regionToFavoredNodes
* @param conf
* @throws IOException
*/
public static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Configuration conf) throws IOException {
List<Put> puts = new ArrayList<Put>();
for (Map.Entry<HRegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
Put put = makePutFromRegionInfo(entry.getKey(), entry.getValue());
if (put != null) {
puts.add(put);
}
}
// Write the region assignments to the meta table.
// TODO: See above overrides take a Connection rather than a Configuration only the
// Connection is a short circuit connection. That is not going to good in all cases, when
// master and meta are not colocated. Fix when this favored nodes feature is actually used
// someday.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
metaTable.put(puts);
}
}
LOG.info("Added " + puts.size() + " regions in META");
}
示例12: testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test
public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() {
// Test the case where we have two racks but with less than two servers in each
// We will not have enough machines to select secondary/tertiary
Map<String,Integer> rackToServerCount = new HashMap<String,Integer>();
rackToServerCount.put("rack1", 1);
rackToServerCount.put("rack2", 1);
Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>>
primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<HRegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap =
helper.placeSecondaryAndTertiaryRS(primaryRSMap);
for (HRegionInfo region : regions) {
// not enough secondary/tertiary room to place the regions
assertTrue(secondaryAndTertiaryMap.get(region) == null);
}
}
示例13: getLeastLoadedTopServerForRegion
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
int getLeastLoadedTopServerForRegion(int region) {
if (regionFinder != null) {
List<ServerName> topLocalServers = regionFinder.getTopBlockLocations(regions[region]);
int leastLoadedServerIndex = -1;
int load = Integer.MAX_VALUE;
for (ServerName sn : topLocalServers) {
if (!serversToIndex.containsKey(sn.getHostAndPort())) {
continue;
}
int index = serversToIndex.get(sn.getHostAndPort());
if (regionsPerServer[index] == null) {
continue;
}
int tempLoad = regionsPerServer[index].length;
if (tempLoad <= load) {
leastLoadedServerIndex = index;
load = tempLoad;
}
}
return leastLoadedServerIndex;
} else {
return -1;
}
}
示例14: checkIsDead
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* If this server is on the dead list, reject it with a YouAreDeadException.
* If it was dead but came back with a new start code, remove the old entry
* from the dead list.
* @param serverName
* @param what START or REPORT
* @throws org.apache.hadoop.hbase.YouAreDeadException
*/
private void checkIsDead(final ServerName serverName, final String what)
throws YouAreDeadException {
if (this.deadservers.isDeadServer(serverName)) {
// host name, port and start code all match with existing one of the
// dead servers. So, this server must be dead.
String message = "Server " + what + " rejected; currently processing " +
serverName + " as dead server";
LOG.debug(message);
throw new YouAreDeadException(message);
}
// remove dead server with same hostname and port of newly checking in rs after master
// initialization.See HBASE-5916 for more information.
if ((this.services == null || ((HMaster) this.services).isInitialized())
&& this.deadservers.cleanPreviousInstance(serverName)) {
// This server has now become alive after we marked it as dead.
// We removed it's previous entry from the dead list to reflect it.
LOG.debug(what + ":" + " Server " + serverName + " came back up," +
" removed it from the dead servers list");
}
}
示例15: allTableRegions
import org.apache.hadoop.hbase.ServerName; //導入依賴的package包/類
/**
* Lists all of the table regions currently in META.
* @param connection
* @param tableName
* @return Map of all user-space regions to servers
* @throws IOException
*/
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(
Connection connection, final TableName tableName) throws IOException {
final NavigableMap<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
@Override
public boolean processRowInternal(Result result) throws IOException {
RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
if (locations == null) return true;
for (HRegionLocation loc : locations.getRegionLocations()) {
if (loc != null) {
HRegionInfo regionInfo = loc.getRegionInfo();
regions.put(new UnmodifyableHRegionInfo(regionInfo), loc.getServerName());
}
}
return true;
}
};
metaScan(connection, visitor, tableName);
return regions;
}