本文整理汇总了Java中org.apache.hadoop.hbase.master.HMaster类的典型用法代码示例。如果您正苦于以下问题:Java HMaster类的具体用法?Java HMaster怎么用?Java HMaster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HMaster类属于org.apache.hadoop.hbase.master包,在下文中一共展示了HMaster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMoveToPreviouslyAssignedRS
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test (timeout=300000)
public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException {
byte[] tableName = Bytes.toBytes("testMoveToPreviouslyAssignedRS");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
HBaseAdmin localAdmin = createTable(tableName);
List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
HRegionInfo hri = tableRegions.get(0);
AssignmentManager am = master.getAssignmentManager();
assertTrue("Region " + hri.getRegionNameAsString()
+ " should be assigned properly", am.waitForAssignment(hri));
ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
assertEquals("Current region server and region server before move should be same.", server,
am.getRegionStates().getRegionServerOfRegion(hri));
}
示例2: testAsyncFlushSnapshot
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test(timeout = 300000)
public void testAsyncFlushSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
.setTable(TABLE_NAME.getNameAsString())
.setType(SnapshotDescription.Type.FLUSH)
.build();
// take the snapshot async
admin.takeSnapshotAsync(snapshot);
// constantly loop, looking for the snapshot to complete
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
LOG.info(" === Async Snapshot Completed ===");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// make sure we get the snapshot
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
}
示例3: addMaster
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index)
throws IOException {
// Create each master with its own Configuration instance so each has
// its HConnection instance rather than share (see HBASE_INSTANCES down in
// the guts of HConnectionManager.
// Also, create separate CoordinatedStateManager instance per Server.
// This is special case when we have to have more than 1 CoordinatedStateManager
// within 1 process.
CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf);
JVMClusterUtil.MasterThread mt = JVMClusterUtil.createMasterThread(c, cp,
(Class<? extends HMaster>) conf.getClass(HConstants.MASTER_IMPL, this.masterClass), index);
this.masterThreads.add(mt);
return mt;
}
示例4: createMasterThread
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
/**
* Creates a {@link MasterThread}.
* Call 'start' on the returned thread to make it run.
* @param c Configuration to use.
* @param cp consensus provider to use
* @param hmc Class to create.
* @param index Used distinguishing the object returned.
* @throws IOException
* @return Master added.
*/
public static JVMClusterUtil.MasterThread createMasterThread(
final Configuration c, CoordinatedStateManager cp, final Class<? extends HMaster> hmc,
final int index)
throws IOException {
HMaster server;
try {
server = hmc.getConstructor(Configuration.class, CoordinatedStateManager.class).
newInstance(c, cp);
} catch (InvocationTargetException ite) {
Throwable target = ite.getTargetException();
throw new RuntimeException("Failed construction of Master: " +
hmc.toString() + ((target.getCause() != null)?
target.getCause().getMessage(): ""), target);
} catch (Exception e) {
IOException ioe = new IOException();
ioe.initCause(e);
throw ioe;
}
return new JVMClusterUtil.MasterThread(server, index);
}
示例5: getServerHoldingRegion
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Override
public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName)
throws IOException {
// Assume there is only one master thread which is the active master.
// If there are multiple master threads, the backup master threads
// should hold some regions. Please refer to #countServedRegions
// to see how we find out all regions.
HMaster master = getMaster();
Region region = master.getOnlineRegion(regionName);
if (region != null) {
return master.getServerName();
}
int index = getServerWith(regionName);
if (index < 0) {
return null;
}
return getRegionServer(index).getServerName();
}
示例6: testFlushedSequenceIdsSentToHMaster
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test
public void testFlushedSequenceIdsSentToHMaster()
throws IOException, InterruptedException, ServiceException {
SortedMap<byte[], Long> allFlushedSequenceIds =
new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
for (int i = 0; i < NUM_RS; ++i) {
flushAllRegions(i);
}
Thread.sleep(10000);
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
for (int i = 0; i < NUM_RS; ++i) {
for (byte[] regionName : getRegionsByServer(i)) {
if (allFlushedSequenceIds.containsKey(regionName)) {
GetLastFlushedSequenceIdRequest req =
RequestConverter.buildGetLastFlushedSequenceIdRequest(regionName);
assertEquals((long)allFlushedSequenceIds.get(regionName),
master.getMasterRpcServices().getLastFlushedSequenceId(
null, req).getLastFlushedSequenceId());
}
}
}
}
示例7: moveRegionAndWait
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
private void moveRegionAndWait(HRegionInfo destRegion, ServerName destServer)
throws InterruptedException, MasterNotRunningException,
ZooKeeperConnectionException, IOException {
HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
TEST_UTIL.getHBaseAdmin().move(
destRegion.getEncodedNameAsBytes(),
Bytes.toBytes(destServer.getServerName()));
while (true) {
ServerName serverName = master.getAssignmentManager()
.getRegionStates().getRegionServerOfRegion(destRegion);
if (serverName != null && serverName.equals(destServer)) {
TEST_UTIL.assertRegionOnServer(
destRegion, serverName, 200);
break;
}
Thread.sleep(10);
}
}
示例8: testCreateTableCalledTwiceAndFirstOneInProgress
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test (timeout=300000)
public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception {
final TableName tableName = TableName.valueOf("testCreateTableCalledTwiceAndFirstOneInProgress");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null,
null) };
CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
desc, cluster.getConfiguration(), hRegionInfos, m);
handler.prepare();
throwException = true;
handler.process();
throwException = false;
CustomCreateTableHandler handler1 = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
desc, cluster.getConfiguration(), hRegionInfos, m);
handler1.prepare();
handler1.process();
for (int i = 0; i < 100; i++) {
if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) {
Thread.sleep(200);
}
}
assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName));
}
示例9: testMasterRestartAfterEnablingNodeIsCreated
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test (timeout=60000)
public void testMasterRestartAfterEnablingNodeIsCreated() throws Exception {
byte[] tableName = Bytes.toBytes("testMasterRestartAfterEnablingNodeIsCreated");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null,
null) };
CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
desc, cluster.getConfiguration(), hRegionInfos, m);
handler.prepare();
throwException = true;
handler.process();
abortAndStartNewMaster(cluster);
assertTrue(cluster.getLiveMasterThreads().size() == 1);
}
示例10: testStarted
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test (timeout=180000)
public void testStarted() throws Exception {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
assertTrue("Master should be active", master.isActiveMaster());
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
assertNotNull("CoprocessorHost should not be null", host);
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
CPMasterObserver.class.getName());
assertNotNull("CPMasterObserver coprocessor not found or not installed!", cp);
// check basic lifecycle
assertTrue("MasterObserver should have been started", cp.wasStarted());
assertTrue("preMasterInitialization() hook should have been called",
cp.wasMasterInitializationCalled());
assertTrue("postStartMaster() hook should have been called",
cp.wasStartMasterCalled());
}
示例11: testTableDescriptorsEnumeration
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test (timeout=180000)
public void testTableDescriptorsEnumeration() throws Exception {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
CPMasterObserver.class.getName());
cp.resetStates();
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest((List<TableName>)null);
master.getMasterRpcServices().getTableDescriptors(null, req);
assertTrue("Coprocessor should be called on table descriptors request",
cp.wasGetTableDescriptorsCalled());
}
示例12: waitForSnapshotToComplete
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
/**
* Helper method for testing async snapshot operations. Just waits for the
* given snapshot to complete on the server by repeatedly checking the master.
*
* @param master: the master running the snapshot
* @param snapshot: the snapshot to check
* @param sleep: amount to sleep between checks to see if the snapshot is done
* @throws ServiceException if the snapshot fails
*/
public static void waitForSnapshotToComplete(HMaster master,
SnapshotDescription snapshot, long sleep) throws ServiceException {
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
.setSnapshot(snapshot).build();
IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
.buildPartial();
while (!done.getDone()) {
done = master.getMasterRpcServices().isSnapshotDone(null, request);
try {
Thread.sleep(sleep);
} catch (InterruptedException e) {
throw new ServiceException(e);
}
}
}
示例13: expectSnapshotDoneException
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
/**
* Expect the snapshot to throw an error when checking if the snapshot is
* complete
*
* @param master master to check
* @param snapshot the {@link SnapshotDescription} request to pass to the master
* @param clazz expected exception from the master
*/
public static void expectSnapshotDoneException(HMaster master,
IsSnapshotDoneRequest snapshot,
Class<? extends HBaseSnapshotException> clazz) {
try {
master.getMasterRpcServices().isSnapshotDone(null, snapshot);
Assert.fail("didn't fail to lookup a snapshot");
} catch (ServiceException se) {
try {
throw ProtobufUtil.getRemoteException(se);
} catch (HBaseSnapshotException e) {
assertEquals("Threw wrong snapshot exception!", clazz, e.getClass());
} catch (Throwable t) {
Assert.fail("Threw an unexpected exception:" + t);
}
}
}
示例14: testAsyncFlushSnapshot
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test(timeout = 300000)
public void testAsyncFlushSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
.setTable(TABLE_NAME.getNameAsString())
.setType(SnapshotDescription.Type.FLUSH)
.build();
// take the snapshot async
admin.takeSnapshotAsync(snapshot);
// constantly loop, looking for the snapshot to complete
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
LOG.info(" === Async Snapshot Completed ===");
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
// make sure we get the snapshot
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
}
示例15: testRewritingClusterIdToPB
import org.apache.hadoop.hbase.master.HMaster; //导入依赖的package包/类
@Test
public void testRewritingClusterIdToPB() throws Exception {
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.startMiniDFSCluster(1);
TEST_UTIL.createRootDir();
TEST_UTIL.getConfiguration().setBoolean("hbase.replication", true);
Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
FSDataOutputStream s = null;
try {
s = fs.create(filePath);
s.writeUTF(UUID.randomUUID().toString());
} finally {
if (s != null) {
s.close();
}
}
TEST_UTIL.startMiniHBaseCluster(1, 1);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
assertEquals(1, master.getServerManager().getOnlineServersList().size());
}