本文整理汇总了Java中org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory类的典型用法代码示例。如果您正苦于以下问题:Java LoadBalancerFactory类的具体用法?Java LoadBalancerFactory怎么用?Java LoadBalancerFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
LoadBalancerFactory类属于org.apache.hadoop.hbase.master.balancer包,在下文中一共展示了LoadBalancerFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testForceAssignMergingRegion
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test (timeout=180000)
public void testForceAssignMergingRegion() throws Exception {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// Need a mocked catalog tracker.
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
try {
// First set the state of the region to merging
regionStates.updateRegionState(hri, RegionState.State.MERGING);
// Now, try to assign it with force new plan
am.assign(hri, true, true);
assertEquals("The region should be still in merging state",
RegionState.State.MERGING, regionStates.getRegionState(hri).getState());
} finally {
am.shutdown();
}
}
示例2: testForceAssignMergingRegion
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test
public void testForceAssignMergingRegion() throws Exception {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// Need a mocked catalog tracker.
CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, ct, balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
try {
// First set the state of the region to merging
regionStates.updateRegionState(hri, RegionState.State.MERGING);
// Now, try to assign it with force new plan
am.assign(hri, true, true);
assertEquals("The region should be still in merging state",
RegionState.State.MERGING, regionStates.getRegionState(hri).getState());
} finally {
am.shutdown();
}
}
示例3: initializeZKBasedSystemTrackers
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Initialize all ZK based system trackers.
*
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws CoordinatedStateException
*/
void initializeZKBasedSystemTrackers() throws IOException,
InterruptedException, KeeperException, CoordinatedStateException {
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.balancer, this.service, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
this.serverManager);
this.regionServerTracker.start();
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
LOG.info("Server active/primary master=" + this.serverName +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
", setting cluster-up flag (Was=" + wasUp + ")");
// create/initialize the snapshot manager and other procedure managers
this.snapshotManager = new SnapshotManager();
this.mpmHost = new MasterProcedureManagerHost();
this.mpmHost.register(this.snapshotManager);
this.mpmHost.register(new MasterFlushTableProcedureManager());
this.mpmHost.loadProcedures(conf);
this.mpmHost.initialize(this, this.metricsMaster);
}
示例4: testUnassignWithSplitAtSameTime
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test (timeout=180000)
public void testUnassignWithSplitAtSameTime() throws KeeperException,
IOException, CoordinatedStateException {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// First amend the servermanager mock so that when we do send close of the
// first meta region on SERVERNAME_A, it will return true rather than
// default null.
Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
// Need a mocked catalog tracker.
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, balancer, null, null, master.getTableLockManager());
try {
// First make sure my mock up basically works. Unassign a region.
unassign(am, SERVERNAME_A, hri);
// This delete will fail if the previous unassign did wrong thing.
ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
// Now put a SPLITTING region in the way. I don't have to assert it
// go put in place. This method puts it in place then asserts it still
// owns it by moving state from SPLITTING to SPLITTING.
int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
// Now, retry the unassign with the SPLTTING in place. It should just
// complete without fail; a sort of 'silent' recognition that the
// region to unassign has been split and no longer exists: TOOD: what if
// the split fails and the parent region comes back to life?
unassign(am, SERVERNAME_A, hri);
// This transition should fail if the znode has been messed with.
ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
assertFalse(am.getRegionStates().isRegionInTransition(hri));
} finally {
am.shutdown();
}
}
示例5: testOpenCloseRegionRPCIntendedForPreviousServer
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
* for openRegion. AM should assign this somewhere else. (HBASE-9721)
*/
@SuppressWarnings("unchecked")
@Test (timeout=180000)
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
Mockito.when(this.serverManager.sendRegionOpen(Mockito.eq(SERVERNAME_B), Mockito.eq(REGIONINFO),
Mockito.anyInt(), (List<ServerName>)Mockito.any()))
.thenThrow(new DoNotRetryIOException());
this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);
HRegionInfo hri = REGIONINFO;
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
try {
am.regionPlans.put(REGIONINFO.getEncodedName(),
new RegionPlan(REGIONINFO, null, SERVERNAME_B));
// Should fail once, but succeed on the second attempt for the SERVERNAME_A
am.assign(hri, true, false);
} finally {
assertEquals(SERVERNAME_A, regionStates.getRegionState(REGIONINFO).getServerName());
am.shutdown();
}
}
示例6: initializeZKBasedSystemTrackers
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Initialize all ZK based system trackers.
* @throws IOException
* @throws InterruptedException
*/
void initializeZKBasedSystemTrackers() throws IOException,
InterruptedException, KeeperException {
this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf, this);
this.catalogTracker.start();
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.catalogTracker, this.balancer, this.executorService, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
this.serverManager);
this.regionServerTracker.start();
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
LOG.info("Server active/primary master=" + this.serverName +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
", setting cluster-up flag (Was=" + wasUp + ")");
// create/initialize the snapshot manager and other procedure managers
this.snapshotManager = new SnapshotManager();
this.mpmHost = new MasterProcedureManagerHost();
this.mpmHost.register(this.snapshotManager);
this.mpmHost.loadProcedures(conf);
this.mpmHost.initialize(this, this.metricsMaster);
}
示例7: testUnassignWithSplitAtSameTime
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException, IOException {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// First amend the servermanager mock so that when we do send close of the
// first meta region on SERVERNAME_A, it will return true rather than
// default null.
Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
// Need a mocked catalog tracker.
CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, ct, balancer, null, null, master.getTableLockManager());
try {
// First make sure my mock up basically works. Unassign a region.
unassign(am, SERVERNAME_A, hri);
// This delete will fail if the previous unassign did wrong thing.
ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
// Now put a SPLITTING region in the way. I don't have to assert it
// go put in place. This method puts it in place then asserts it still
// owns it by moving state from SPLITTING to SPLITTING.
int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
// Now, retry the unassign with the SPLTTING in place. It should just
// complete without fail; a sort of 'silent' recognition that the
// region to unassign has been split and no longer exists: TOOD: what if
// the split fails and the parent region comes back to life?
unassign(am, SERVERNAME_A, hri);
// This transition should fail if the znode has been messed with.
ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
assertFalse(am.getRegionStates().isRegionInTransition(hri));
} finally {
am.shutdown();
}
}
示例8: testOpenCloseRegionRPCIntendedForPreviousServer
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Tests an on-the-fly RPC that was scheduled for the earlier RS on the same port
* for openRegion. AM should assign this somewhere else. (HBASE-9721)
*/
@SuppressWarnings("unchecked")
@Test
public void testOpenCloseRegionRPCIntendedForPreviousServer() throws Exception {
Mockito.when(this.serverManager.sendRegionOpen(Mockito.eq(SERVERNAME_B), Mockito.eq(REGIONINFO),
Mockito.anyInt(), (List<ServerName>)Mockito.any()))
.thenThrow(new DoNotRetryIOException());
this.server.getConfiguration().setInt("hbase.assignment.maximum.attempts", 100);
HRegionInfo hri = REGIONINFO;
CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(
server.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, ct, balancer, null, null, master.getTableLockManager());
RegionStates regionStates = am.getRegionStates();
try {
am.regionPlans.put(REGIONINFO.getEncodedName(),
new RegionPlan(REGIONINFO, null, SERVERNAME_B));
// Should fail once, but succeed on the second attempt for the SERVERNAME_A
am.assign(hri, true, false);
} finally {
assertEquals(SERVERNAME_A, regionStates.getRegionState(REGIONINFO).getServerName());
}
}
示例9: initializeZKBasedSystemTrackers
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Initialize all ZK based system trackers.
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws CoordinatedStateException
*/
void initializeZKBasedSystemTrackers() throws IOException,
InterruptedException, KeeperException, CoordinatedStateException {
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.catalogTracker, this.balancer, this.service, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
this.serverManager);
this.regionServerTracker.start();
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
LOG.info("Server active/primary master=" + this.serverName +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
", setting cluster-up flag (Was=" + wasUp + ")");
// create/initialize the snapshot manager and other procedure managers
this.snapshotManager = new SnapshotManager();
this.mpmHost = new MasterProcedureManagerHost();
this.mpmHost.register(this.snapshotManager);
this.mpmHost.register(new MasterFlushTableProcedureManager());
this.mpmHost.loadProcedures(conf);
this.mpmHost.initialize(this, this.metricsMaster);
}
示例10: testUnassignWithSplitAtSameTime
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException,
IOException, CoordinatedStateException {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// First amend the servermanager mock so that when we do send close of the
// first meta region on SERVERNAME_A, it will return true rather than
// default null.
Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
// Need a mocked catalog tracker.
CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, ct, balancer, null, null, master.getTableLockManager());
try {
// First make sure my mock up basically works. Unassign a region.
unassign(am, SERVERNAME_A, hri);
// This delete will fail if the previous unassign did wrong thing.
ZKAssign.deleteClosingNode(this.watcher, hri, SERVERNAME_A);
// Now put a SPLITTING region in the way. I don't have to assert it
// go put in place. This method puts it in place then asserts it still
// owns it by moving state from SPLITTING to SPLITTING.
int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
// Now, retry the unassign with the SPLTTING in place. It should just
// complete without fail; a sort of 'silent' recognition that the
// region to unassign has been split and no longer exists: TOOD: what if
// the split fails and the parent region comes back to life?
unassign(am, SERVERNAME_A, hri);
// This transition should fail if the znode has been messed with.
ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
assertFalse(am.getRegionStates().isRegionInTransition(hri));
} finally {
am.shutdown();
}
}
示例11: initializeZKBasedSystemTrackers
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Initialize all ZK based system trackers.
* @throws IOException
* @throws InterruptedException
*/
void initializeZKBasedSystemTrackers() throws IOException,
InterruptedException, KeeperException {
this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf, this);
this.catalogTracker.start();
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.catalogTracker, this.balancer, this.executorService, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
this.serverManager);
this.regionServerTracker.start();
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
LOG.info("Server active/primary master=" + this.serverName +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
", setting cluster-up flag (Was=" + wasUp + ")");
// create the snapshot manager
this.snapshotManager = new SnapshotManager(this, this.metricsMaster);
}
示例12: initializeZKBasedSystemTrackers
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Initialize all ZK based system trackers.
* @throws IOException
* @throws InterruptedException
*/
private void initializeZKBasedSystemTrackers() throws IOException,
InterruptedException, KeeperException {
this.catalogTracker = createCatalogTracker(this.zooKeeper, this.conf,
this, conf.getInt("hbase.master.catalog.timeout", 600000));
this.catalogTracker.start();
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.catalogTracker, this.balancer, this.executorService, this.metricsMaster);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
this.serverManager);
this.regionServerTracker.start();
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
LOG.info("Server active/primary master; " + this.serverName +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
", cluster-up flag was=" + wasUp);
}
示例13: testUnassignWithSplitAtSameTime
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test
public void testUnassignWithSplitAtSameTime() throws KeeperException, IOException {
// Region to use in test.
final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
// First amend the servermanager mock so that when we do send close of the
// first meta region on SERVERNAME_A, it will return true rather than
// default null.
Mockito.when(this.serverManager.sendRegionClose(SERVERNAME_A, hri, -1)).thenReturn(true);
// Need a mocked catalog tracker.
CatalogTracker ct = Mockito.mock(CatalogTracker.class);
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(server
.getConfiguration());
// Create an AM.
AssignmentManager am = new AssignmentManager(this.server,
this.serverManager, ct, balancer, null, null);
try {
// First make sure my mock up basically works. Unassign a region.
unassign(am, SERVERNAME_A, hri);
// This delete will fail if the previous unassign did wrong thing.
ZKAssign.deleteClosingNode(this.watcher, hri);
// Now put a SPLITTING region in the way. I don't have to assert it
// go put in place. This method puts it in place then asserts it still
// owns it by moving state from SPLITTING to SPLITTING.
int version = createNodeSplitting(this.watcher, hri, SERVERNAME_A);
// Now, retry the unassign with the SPLTTING in place. It should just
// complete without fail; a sort of 'silent' recognition that the
// region to unassign has been split and no longer exists: TOOD: what if
// the split fails and the parent region comes back to life?
unassign(am, SERVERNAME_A, hri);
// This transition should fail if the znode has been messed with.
ZKAssign.transitionNode(this.watcher, hri, SERVERNAME_A,
EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
assertFalse(am.getRegionStates().isRegionInTransition(hri));
} finally {
am.shutdown();
}
}
示例14: initializeZKBasedSystemTrackers
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
/**
* Initialize all ZK based system trackers.
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws CoordinatedStateException
*/
void initializeZKBasedSystemTrackers() throws IOException,
InterruptedException, KeeperException, CoordinatedStateException {
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
this.normalizer.setMasterServices(this);
this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this);
this.loadBalancerTracker.start();
this.regionNormalizerTracker = new RegionNormalizerTracker(zooKeeper, this);
this.regionNormalizerTracker.start();
this.assignmentManager = new AssignmentManager(this, serverManager,
this.balancer, this.service, this.metricsMaster,
this.tableLockManager);
zooKeeper.registerListenerFirst(assignmentManager);
this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
this.serverManager);
this.regionServerTracker.start();
this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this,
this.serverManager);
this.drainingServerTracker.start();
// Set the cluster as up. If new RSs, they'll be waiting on this before
// going ahead with their startup.
boolean wasUp = this.clusterStatusTracker.isClusterUp();
if (!wasUp) this.clusterStatusTracker.setClusterUp();
LOG.info("Server active/primary master=" + this.serverName +
", sessionid=0x" +
Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
", setting cluster-up flag (Was=" + wasUp + ")");
// create/initialize the snapshot manager and other procedure managers
this.snapshotManager = new SnapshotManager();
this.mpmHost = new MasterProcedureManagerHost();
this.mpmHost.register(this.snapshotManager);
this.mpmHost.register(new MasterFlushTableProcedureManager());
this.mpmHost.loadProcedures(conf);
this.mpmHost.initialize(this, this.metricsMaster);
}
示例15: testFavoredNodesPresentForRoundRobinAssignment
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; //导入依赖的package包/类
@Test
public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException {
LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration());
balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster());
List<ServerName> servers = new ArrayList<ServerName>();
for (int i = 0; i < SLAVES; i++) {
ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName();
servers.add(server);
}
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar"));
regions.add(region);
Map<ServerName,List<HRegionInfo>> assignmentMap = balancer.roundRobinAssignment(regions,
servers);
Set<ServerName> serverBefore = assignmentMap.keySet();
List<ServerName> favoredNodesBefore =
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
assertTrue(favoredNodesBefore.size() == 3);
// the primary RS should be the one that the balancer's assignment returns
assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(),
favoredNodesBefore.get(PRIMARY)));
// now remove the primary from the list of available servers
List<ServerName> removedServers = removeMatchingServers(serverBefore, servers);
// call roundRobinAssignment with the modified servers list
assignmentMap = balancer.roundRobinAssignment(regions, servers);
List<ServerName> favoredNodesAfter =
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
assertTrue(favoredNodesAfter.size() == 3);
// We don't expect the favored nodes assignments to change in multiple calls
// to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign
// failures)
assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore));
Set<ServerName> serverAfter = assignmentMap.keySet();
// We expect the new RegionServer assignee to be one of the favored nodes
// chosen earlier.
assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
favoredNodesBefore.get(SECONDARY)) ||
ServerName.isSameHostnameAndPort(serverAfter.iterator().next(),
favoredNodesBefore.get(TERTIARY)));
// put back the primary in the list of available servers
servers.addAll(removedServers);
// now roundRobinAssignment with the modified servers list should return the primary
// as the regionserver assignee
assignmentMap = balancer.roundRobinAssignment(regions, servers);
Set<ServerName> serverWithPrimary = assignmentMap.keySet();
assertTrue(serverBefore.containsAll(serverWithPrimary));
// Make all the favored nodes unavailable for assignment
removeMatchingServers(favoredNodesAfter, servers);
// call roundRobinAssignment with the modified servers list
assignmentMap = balancer.roundRobinAssignment(regions, servers);
List<ServerName> favoredNodesNow =
((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region);
assertTrue(favoredNodesNow.size() == 3);
assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) &&
!favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) &&
!favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY)));
}