本文整理匯總了Java中org.apache.hadoop.hbase.MiniHBaseCluster類的典型用法代碼示例。如果您正苦於以下問題:Java MiniHBaseCluster類的具體用法?Java MiniHBaseCluster怎麽用?Java MiniHBaseCluster使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
MiniHBaseCluster類屬於org.apache.hadoop.hbase包,在下文中一共展示了MiniHBaseCluster類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testMoveToPreviouslyAssignedRS
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test (timeout=300000)
public void testMoveToPreviouslyAssignedRS() throws IOException, InterruptedException {
byte[] tableName = Bytes.toBytes("testMoveToPreviouslyAssignedRS");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
HBaseAdmin localAdmin = createTable(tableName);
List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
HRegionInfo hri = tableRegions.get(0);
AssignmentManager am = master.getAssignmentManager();
assertTrue("Region " + hri.getRegionNameAsString()
+ " should be assigned properly", am.waitForAssignment(hri));
ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
assertEquals("Current region server and region server before move should be same.", server,
am.getRegionStates().getRegionServerOfRegion(hri));
}
示例2: testCreateTableCalledTwiceAndFirstOneInProgress
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test (timeout=300000)
public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception {
final TableName tableName = TableName.valueOf("testCreateTableCalledTwiceAndFirstOneInProgress");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null,
null) };
CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
desc, cluster.getConfiguration(), hRegionInfos, m);
handler.prepare();
throwException = true;
handler.process();
throwException = false;
CustomCreateTableHandler handler1 = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
desc, cluster.getConfiguration(), hRegionInfos, m);
handler1.prepare();
handler1.process();
for (int i = 0; i < 100; i++) {
if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) {
Thread.sleep(200);
}
}
assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName));
}
示例3: testMasterRestartAfterEnablingNodeIsCreated
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test (timeout=60000)
public void testMasterRestartAfterEnablingNodeIsCreated() throws Exception {
byte[] tableName = Bytes.toBytes("testMasterRestartAfterEnablingNodeIsCreated");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null,
null) };
CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
desc, cluster.getConfiguration(), hRegionInfos, m);
handler.prepare();
throwException = true;
handler.process();
abortAndStartNewMaster(cluster);
assertTrue(cluster.getLiveMasterThreads().size() == 1);
}
示例4: testDisableTableAndRestart
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test(timeout = 300000)
public void testDisableTableAndRestart() throws Exception {
final TableName tableName = TableName.valueOf("testDisableTableAndRestart");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
admin.createTable(desc);
admin.disableTable(tableName);
TEST_UTIL.waitTableDisabled(tableName.getName());
TEST_UTIL.getHBaseCluster().shutdown();
TEST_UTIL.getHBaseCluster().waitUntilShutDown();
TEST_UTIL.restartHBaseCluster(2);
admin.enableTable(tableName);
TEST_UTIL.waitTableEnabled(tableName);
}
示例5: testStarted
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test (timeout=180000)
public void testStarted() throws Exception {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
assertTrue("Master should be active", master.isActiveMaster());
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
assertNotNull("CoprocessorHost should not be null", host);
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
CPMasterObserver.class.getName());
assertNotNull("CPMasterObserver coprocessor not found or not installed!", cp);
// check basic lifecycle
assertTrue("MasterObserver should have been started", cp.wasStarted());
assertTrue("preMasterInitialization() hook should have been called",
cp.wasMasterInitializationCalled());
assertTrue("postStartMaster() hook should have been called",
cp.wasStartMasterCalled());
}
示例6: testTableDescriptorsEnumeration
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test (timeout=180000)
public void testTableDescriptorsEnumeration() throws Exception {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
CPMasterObserver.class.getName());
cp.resetStates();
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest((List<TableName>)null);
master.getMasterRpcServices().getTableDescriptors(null, req);
assertTrue("Coprocessor should be called on table descriptors request",
cp.wasGetTableDescriptorsCalled());
}
示例7: testExceptionDuringInitialization
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test(timeout=60000)
public void testExceptionDuringInitialization() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
TEST_UTIL.startMiniCluster(2);
try {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// Trigger one regionserver to fail as if it came up with a coprocessor
// that fails during initialization
final HRegionServer regionServer = cluster.getRegionServer(0);
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
FailedInitializationObserver.class.getName());
regionServer.getRegionServerCoprocessorHost().loadSystemCoprocessors(conf,
CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
TEST_UTIL.waitFor(10000, 1000, new Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return regionServer.isAborted();
}
});
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
示例8: beforeClass
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@BeforeClass
public static void beforeClass() throws Exception {
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
// Try to bind the hostname to localhost to solve an issue when it is not configured or
// no DNS resolution available.
conf.setStrings("hbase.master.hostname", "localhost");
conf.setStrings("hbase.regionserver.hostname", "localhost");
htu = new HBaseTestingUtility(conf);
// We don't use the full htu.startMiniCluster() to avoid starting unneeded HDFS/MR daemons
htu.startMiniZKCluster();
MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 4);
hbm.waitForActiveAndReadyMaster();
admin = htu.getHBaseAdmin();
}
示例9: testUnmanagedHConnectionReconnect
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
/**
* test of that unmanaged HConnections are able to reconnect
* properly (see HBASE-5058)
*
* @throws Exception
*/
@Test
public void testUnmanagedHConnectionReconnect() throws Exception {
final byte[] tableName = Bytes.toBytes("testUnmanagedHConnectionReconnect");
HTable t = createUnmangedHConnectionHTable(tableName);
HConnection conn = t.getConnection();
HBaseAdmin ha = new HBaseAdmin(conn);
assertTrue(ha.tableExists(tableName));
assertTrue(t.get(new Get(ROW)).isEmpty());
// stop the master
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
cluster.stopMaster(0, false);
cluster.waitOnMaster(0);
// start up a new master
cluster.startMaster();
assertTrue(cluster.waitForActiveAndReadyMaster());
// test that the same unmanaged connection works with a new
// HBaseAdmin and can connect to the new master;
HBaseAdmin newAdmin = new HBaseAdmin(conn);
assertTrue(newAdmin.tableExists(tableName));
assert(newAdmin.getClusterStatus().getServersSize() == SLAVES);
}
示例10: testCreateTableCalledTwiceAndFirstOneInProgress
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test
public void testCreateTableCalledTwiceAndFirstOneInProgress() throws Exception {
final byte[] tableName = Bytes.toBytes("testCreateTableCalledTwiceAndFirstOneInProgress");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getName(), null,
null) };
CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
m.getServerManager(), desc, cluster.getConfiguration(), hRegionInfos,
m.getCatalogTracker(), m.getAssignmentManager());
throwException = true;
handler.process();
throwException = false;
CustomCreateTableHandler handler1 = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
m.getServerManager(), desc, cluster.getConfiguration(), hRegionInfos,
m.getCatalogTracker(), m.getAssignmentManager());
handler1.process();
for (int i = 0; i < 100; i++) {
if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) {
Thread.sleep(200);
}
}
assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName));
}
示例11: testMasterRestartAfterEnablingNodeIsCreated
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test (timeout=60000)
public void testMasterRestartAfterEnablingNodeIsCreated() throws Exception {
byte[] tableName = Bytes.toBytes("testMasterRestartAfterEnablingNodeIsCreated");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getName(), null,
null) };
CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(),
m.getServerManager(), desc, cluster.getConfiguration(), hRegionInfos,
m.getCatalogTracker(), m.getAssignmentManager());
throwException = true;
handler.process();
abortAndStartNewMaster(cluster);
assertTrue(cluster.getLiveMasterThreads().size() == 1);
}
示例12: hbaseOperation
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
private HBaseTestingUtility hbaseOperation() throws Exception {
HBaseTestingUtility hbaseTestingUtility = new HBaseTestingUtility();
/**
* # fsOwner's name is Benedict Jin, will throw exception: Illegal character in path at index 42
* hbaseTestingUtility.getTestFileSystem().setOwner(new Path(BASE_PATH.concat("/owner")), "Benedict Jin", "supergroup");
*/
MiniHBaseCluster hbaseCluster = hbaseTestingUtility.startMiniCluster();
hbaseTestingUtility.createTable(Bytes.toBytes(TABLE_NAME), Bytes.toBytes("context"));
hbaseTestingUtility.deleteTable(Bytes.toBytes(TABLE_NAME));
Configuration config = hbaseCluster.getConf();
Connection conn = ConnectionFactory.createConnection(config);
HBaseAdmin hbaseAdmin = new HBaseAdmin(conn);
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
hbaseAdmin.createTable(desc);
return hbaseTestingUtility;
}
示例13: testEnabledIndexTableShouldBeDisabledIfUserTableDisabledAndMasterRestarted
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test(timeout = 180000)
public void testEnabledIndexTableShouldBeDisabledIfUserTableDisabledAndMasterRestarted()
throws Exception {
String tableName = "testEnabledIndexTableDisabledIfUserTableDisabledAndMasterRestarted";
String indexTableName = IndexUtils.getIndexTableName(tableName);
HTableDescriptor iHtd =
TestUtils.createIndexedHTableDescriptor(tableName, "cf", "index_name", "cf", "cq");
admin.createTable(iHtd);
admin.disableTable(tableName);
admin.enableTable(indexTableName);
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
cluster.abortMaster(0);
cluster.startMaster();
cluster.waitOnMaster(0);
cluster.waitForActiveAndReadyMaster();
Thread.sleep(1000);
assertTrue("User table should be disabled.", admin.isTableDisabled(tableName));
assertTrue("Index table should be disabled.", admin.isTableDisabled(indexTableName));
}
示例14: testDisabledIndexTableShouldBeEnabledIfUserTableInEnablingAndMasterRestarted
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test(timeout = 180000)
public void testDisabledIndexTableShouldBeEnabledIfUserTableInEnablingAndMasterRestarted()
throws Exception {
String tableName = "testDisabledIndexTableEnabledIfUserTableInEnablingAndMasterRestarted";
String indexTableName = IndexUtils.getIndexTableName(tableName);
HTableDescriptor iHtd =
TestUtils.createIndexedHTableDescriptor(tableName, "cf", "index_name", "cf", "cq");
admin.createTable(iHtd);
admin.disableTable(indexTableName);
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
master.getAssignmentManager().getZKTable().setEnablingTable(TableName.valueOf(tableName));
cluster.abortMaster(0);
cluster.startMaster();
cluster.waitOnMaster(0);
cluster.waitForActiveAndReadyMaster();
Thread.sleep(1000);
assertTrue("User table should be enabled.", admin.isTableEnabled(tableName));
assertTrue("Index table should be enabled.", admin.isTableEnabled(indexTableName));
}
示例15: testStarted
import org.apache.hadoop.hbase.MiniHBaseCluster; //導入依賴的package包/類
@Test
public void testStarted() throws Exception {
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
HMaster master = cluster.getMaster();
assertTrue("Master should be active", master.isActiveMaster());
MasterCoprocessorHost host = master.getMasterCoprocessorHost();
assertNotNull("CoprocessorHost should not be null", host);
CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
CPMasterObserver.class.getName());
assertNotNull("CPMasterObserver coprocessor not found or not installed!", cp);
// check basic lifecycle
assertTrue("MasterObserver should have been started", cp.wasStarted());
assertTrue("preMasterInitialization() hook should have been called",
cp.wasMasterInitializationCalled());
assertTrue("postStartMaster() hook should have been called",
cp.wasStartMasterCalled());
}