本文整理汇总了Java中org.apache.hadoop.hbase.master.RegionPlan类的典型用法代码示例。如果您正苦于以下问题:Java RegionPlan类的具体用法?Java RegionPlan怎么用?Java RegionPlan使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RegionPlan类属于org.apache.hadoop.hbase.master包,在下文中一共展示了RegionPlan类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createRegionPlans
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Create all of the RegionPlan's needed to move from the initial cluster state to the desired
* state.
*
* @param cluster The state of the cluster
* @return List of RegionPlan's that represent the moves needed to get to desired final state.
*/
private List<RegionPlan> createRegionPlans(Cluster cluster) {
List<RegionPlan> plans = new LinkedList<RegionPlan>();
for (int regionIndex = 0;
regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) {
int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex];
int newServerIndex = cluster.regionIndexToServerIndex[regionIndex];
if (initialServerIndex != newServerIndex) {
HRegionInfo region = cluster.regions[regionIndex];
ServerName initialServer = cluster.servers[initialServerIndex];
ServerName newServer = cluster.servers[newServerIndex];
if (LOG.isTraceEnabled()) {
LOG.trace("Moving Region " + region.getEncodedName() + " from server "
+ initialServer.getHostname() + " to " + newServer.getHostname());
}
RegionPlan rp = new RegionPlan(region, initialServer, newServer);
plans.add(rp);
}
}
return plans;
}
示例2: testBalanceCluster
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Test the load balancing algorithm.
*
* Invariant is that all servers should be hosting either floor(average) or
* ceiling(average)
*
* @throws Exception
*/
@Test (timeout=60000)
public void testBalanceCluster() throws Exception {
for (int[] mockCluster : clusterStateMocks) {
Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
List<ServerAndLoad> list = convertToList(servers);
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers);
LOG.info("Mock Balance : " + printMock(balancedCluster));
assertClusterAsBalanced(balancedCluster);
for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
returnRegions(entry.getValue());
returnServer(entry.getKey());
}
}
}
示例3: testBalanceCluster
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Test the load balancing algorithm.
*
* Invariant is that all servers should be hosting either floor(average) or
* ceiling(average)
*
* @throws Exception
*/
@Test
public void testBalanceCluster() throws Exception {
for (int[] mockCluster : clusterStateMocks) {
Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
List<ServerAndLoad> list = convertToList(servers);
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers);
LOG.info("Mock Balance : " + printMock(balancedCluster));
assertClusterAsBalanced(balancedCluster);
List<RegionPlan> secondPlans = loadBalancer.balanceCluster(servers);
assertNull(secondPlans);
for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
returnRegions(entry.getValue());
returnServer(entry.getKey());
}
}
}
示例4: reconcile
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* This assumes the RegionPlan HSI instances are the same ones in the map, so
* actually no need to even pass in the map, but I think it's clearer.
*
* @param list
* @param plans
* @return
*/
protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list,
List<RegionPlan> plans,
Map<ServerName, List<HRegionInfo>> servers) {
List<ServerAndLoad> result = new ArrayList<ServerAndLoad>(list.size());
Map<ServerName, ServerAndLoad> map = new HashMap<ServerName, ServerAndLoad>(list.size());
for (ServerAndLoad sl : list) {
map.put(sl.getServerName(), sl);
}
if (plans != null) {
for (RegionPlan plan : plans) {
ServerName source = plan.getSource();
updateLoad(map, source, -1);
ServerName destination = plan.getDestination();
updateLoad(map, destination, +1);
servers.get(source).remove(plan.getRegionInfo());
servers.get(destination).add(plan.getRegionInfo());
}
}
result.clear();
result.addAll(map.values());
return result;
}
示例5: testBalanceCluster
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Test the load balancing algorithm.
*
* Invariant is that all servers should be hosting either floor(average) or
* ceiling(average)
*
* @throws Exception
*/
@Test
public void testBalanceCluster() throws Exception {
for (int[] mockCluster : clusterStateMocks) {
Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster);
List<ServerAndLoad> list = convertToList(servers);
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers);
LOG.info("Mock Balance : " + printMock(balancedCluster));
assertClusterAsBalanced(balancedCluster);
for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) {
returnRegions(entry.getValue());
returnServer(entry.getKey());
}
}
}
示例6: reconcile
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* This assumes the RegionPlan HSI instances are the same ones in the map, so
* actually no need to even pass in the map, but I think it's clearer.
*
* @param list
* @param plans
* @return
*/
protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list,
List<RegionPlan> plans,
Map<ServerName, List<HRegionInfo>> servers) {
List<ServerAndLoad> result = new ArrayList<ServerAndLoad>(list.size());
if (plans == null) return result;
Map<ServerName, ServerAndLoad> map = new HashMap<ServerName, ServerAndLoad>(list.size());
for (ServerAndLoad sl : list) {
map.put(sl.getServerName(), sl);
}
for (RegionPlan plan : plans) {
ServerName source = plan.getSource();
updateLoad(map, source, -1);
ServerName destination = plan.getDestination();
updateLoad(map, destination, +1);
servers.get(source).remove(plan.getRegionInfo());
servers.get(destination).add(plan.getRegionInfo());
}
result.clear();
result.addAll(map.values());
return result;
}
示例7: makePlan
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
public static List<RegionPlan> makePlan(HBaseAdmin admin, List<RegionPlan> newRegionPlan) throws IOException {
// snapshot current region assignment
Map<HRegionInfo, ServerName> regionAssignmentMap = createRegionAssignmentMap(admin);
// update with new plan
for (RegionPlan regionPlan : newRegionPlan) {
regionAssignmentMap.put(regionPlan.getRegionInfo(), regionPlan.getDestination());
}
Map<ServerName, List<HRegionInfo>> clusterState = initializeRegionMap(admin);
for (Map.Entry<HRegionInfo, ServerName> entry : regionAssignmentMap.entrySet())
clusterState.get(entry.getValue()).add(entry.getKey());
StochasticLoadBalancer balancer = new StochasticLoadBalancer();
Configuration conf = admin.getConfiguration();
conf.setFloat("hbase.regions.slop", 0.2f);
balancer.setConf(conf);
return balancer.balanceCluster(clusterState);
}
示例8: preview
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
@SuppressWarnings("SimplifiableIfStatement")
private boolean preview(List<RegionPlan> regionPlanList, boolean asynchronous) throws IOException, InterruptedException {
final boolean proceed;
if (args.isForceProceed()) {
proceed = true;
} else {
balance(args, regionPlanList, Phase.PREVIEW, asynchronous);
if (regionPlanList.size() > 0) {
System.out.println(regionPlanList.size() + " of " + getRegionAssignmentMap(admin, tableNameSet).size() + " region(s) will be moved.");
warnBalanceAgain(regionPlanList);
proceed = Util.askProceed();
} else {
System.out.println("There is no region to move.");
proceed = false;
}
}
return proceed;
}
示例9: createRegionPlans
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Create all of the RegionPlan's needed to move from the initial cluster state to the desired
* state.
*
* @param cluster The state of the cluster
* @return List of RegionPlan's that represent the moves needed to get to desired final state.
*/
private List<RegionPlan> createRegionPlans(Cluster cluster) {
List<RegionPlan> plans = new LinkedList<>();
for (int regionIndex = 0;
regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) {
int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex];
int newServerIndex = cluster.regionIndexToServerIndex[regionIndex];
if (initialServerIndex != newServerIndex) {
RegionInfo region = cluster.regions[regionIndex];
ServerName initialServer = cluster.servers[initialServerIndex];
ServerName newServer = cluster.servers[newServerIndex];
if (LOG.isTraceEnabled()) {
LOG.trace("Moving Region " + region.getEncodedName() + " from server "
+ initialServer.getHostname() + " to " + newServer.getHostname());
}
RegionPlan rp = new RegionPlan(region, initialServer, newServer);
plans.add(rp);
}
}
return plans;
}
示例10: createMoveRegionProcedure
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
public MoveRegionProcedure createMoveRegionProcedure(final RegionPlan plan) {
if (plan.getRegionInfo().getTable().isSystemTable()) {
List<ServerName> exclude = getExcludedServersForSystemTable();
if (plan.getDestination() != null && exclude.contains(plan.getDestination())) {
try {
LOG.info("Can not move " + plan.getRegionInfo() + " to " + plan.getDestination()
+ " because the server is not with highest version");
plan.setDestination(getBalancer().randomAssignment(plan.getRegionInfo(),
this.master.getServerManager().createDestinationServersList(exclude)));
} catch (HBaseIOException e) {
LOG.warn(e.toString(), e);
}
}
}
return new MoveRegionProcedure(getProcedureEnvironment(), plan);
}
示例11: testBalanceCluster
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Test the load balancing algorithm.
* <p>
* Invariant is that all servers should be hosting either floor(average) or ceiling(average)
*/
@Test
public void testBalanceCluster() throws Exception {
conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
loadBalancer.setConf(conf);
for (int[] mockCluster : clusterStateMocks) {
Map<ServerName, List<RegionInfo>> servers = mockClusterServers(mockCluster);
List<ServerAndLoad> list = convertToList(servers);
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers);
LOG.info("Mock Balance : " + printMock(balancedCluster));
assertClusterAsBalanced(balancedCluster);
List<RegionPlan> secondPlans = loadBalancer.balanceCluster(servers);
assertNull(secondPlans);
for (Map.Entry<ServerName, List<RegionInfo>> entry : servers.entrySet()) {
returnRegions(entry.getValue());
returnServer(entry.getKey());
}
}
}
示例12: reconcile
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* This assumes the RegionPlan HSI instances are the same ones in the map, so
* actually no need to even pass in the map, but I think it's clearer.
*
* @param list
* @param plans
* @return
*/
protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list,
List<RegionPlan> plans,
Map<ServerName, List<RegionInfo>> servers) {
List<ServerAndLoad> result = new ArrayList<>(list.size());
Map<ServerName, ServerAndLoad> map = new HashMap<>(list.size());
for (ServerAndLoad sl : list) {
map.put(sl.getServerName(), sl);
}
if (plans != null) {
for (RegionPlan plan : plans) {
ServerName source = plan.getSource();
updateLoad(map, source, -1);
ServerName destination = plan.getDestination();
updateLoad(map, destination, +1);
servers.get(source).remove(plan.getRegionInfo());
servers.get(destination).add(plan.getRegionInfo());
}
}
result.clear();
result.addAll(map.values());
return result;
}
示例13: reconcile
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
private ArrayListMultimap<String, ServerAndLoad> reconcile(
ArrayListMultimap<String, ServerAndLoad> previousLoad,
List<RegionPlan> plans) {
ArrayListMultimap<String, ServerAndLoad> result = ArrayListMultimap
.create();
result.putAll(previousLoad);
if (plans != null) {
for (RegionPlan plan : plans) {
ServerName source = plan.getSource();
updateLoad(result, source, -1);
ServerName destination = plan.getDestination();
updateLoad(result, destination, +1);
}
}
return result;
}
示例14: populatePool
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Unassign all regions, so that they go through the regular region
* assignment flow (in assignment manager) and are re-opened.
*/
@Override
protected void populatePool(ExecutorService pool) {
LOG.debug("Creating threads for each region server ");
for (Map.Entry<ServerName, List<HRegionInfo>> e : rsToRegions
.entrySet()) {
final List<HRegionInfo> hris = e.getValue();
// add plans for the regions that need to be reopened
Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>();
for (HRegionInfo hri : hris) {
RegionPlan reOpenPlan = new RegionPlan(hri, null,
assignmentManager.getRegionServerOfRegion(hri));
plans.put(hri.getEncodedName(), reOpenPlan);
}
assignmentManager.addPlans(plans);
pool.execute(new Runnable() {
public void run() {
assignmentManager.unassign(hris);
}
});
}
}
示例15: updateTimers
import org.apache.hadoop.hbase.master.RegionPlan; //导入依赖的package包/类
/**
* Touch timers for all regions in transition that have the passed
* <code>sn</code> in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
* successful open then the 1k-th region won't be timed out just because its
* sitting behind the open of 999 other regions. This method is NOT used
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
* @param sn
*/
private void updateTimers(final ServerName sn) {
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
// if the Map we're going against is a little stale
Map<String, RegionPlan> copy = new HashMap<String, RegionPlan>();
synchronized(this.regionPlans) {
copy.putAll(this.regionPlans);
}
for (Map.Entry<String, RegionPlan> e: copy.entrySet()) {
if (e.getValue() == null || e.getValue().getDestination() == null) continue;
if (!e.getValue().getDestination().equals(sn)) continue;
RegionState rs = null;
synchronized (this.regionsInTransition) {
rs = this.regionsInTransition.get(e.getKey());
}
if (rs == null) continue;
rs.updateTimestampToNow();
}
}