本文整理汇总了Java中org.apache.hadoop.hbase.zookeeper.ZKAssign.blockUntilNoRIT方法的典型用法代码示例。如果您正苦于以下问题:Java ZKAssign.blockUntilNoRIT方法的具体用法?Java ZKAssign.blockUntilNoRIT怎么用?Java ZKAssign.blockUntilNoRIT使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.zookeeper.ZKAssign
的用法示例。
在下文中一共展示了ZKAssign.blockUntilNoRIT方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createRegions
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private MiniHBaseCluster createRegions(String tableName)
throws InterruptedException, ZooKeeperConnectionException, IOException,
KeeperException {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Waiting for active/ready master");
cluster.waitForActiveAndReadyMaster();
zkw = new ZooKeeperWatcher(conf, "testOpenedRegionHandler", null);
// Create a table with regions
byte[] table = Bytes.toBytes(tableName);
byte[] family = Bytes.toBytes("family");
TEST_UTIL.createTable(table, family);
//wait till the regions are online
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
return cluster;
}
示例2: setEncodingConf
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void setEncodingConf(DataBlockEncoding encoding,
boolean onlineChange) throws Exception {
LOG.debug("Setting CF encoding to " + encoding + " (ordinal="
+ encoding.ordinal() + "), onlineChange=" + onlineChange);
hcd.setDataBlockEncoding(encoding);
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
if (!onlineChange) {
admin.disableTable(tableName);
}
admin.modifyColumn(tableName, hcd);
if (!onlineChange) {
admin.enableTable(tableName);
}
}
// This is a unit test, not integration test. So let's
// wait for regions out of transition. Otherwise, for online
// encoding change, verification phase may be flaky because
// regions could be still in transition.
ZKAssign.blockUntilNoRIT(TEST_UTIL.getZooKeeperWatcher());
}
示例3: setEncodingConf
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void setEncodingConf(DataBlockEncoding encoding,
boolean onlineChange) throws Exception {
LOG.debug("Setting CF encoding to " + encoding + " (ordinal="
+ encoding.ordinal() + "), onlineChange=" + onlineChange);
hcd.setDataBlockEncoding(encoding);
if (!onlineChange) {
admin.disableTable(tableName);
}
admin.modifyColumn(tableName, hcd);
if (!onlineChange) {
admin.enableTable(tableName);
}
// This is a unit test, not integration test. So let's
// wait for regions out of transition. Otherwise, for online
// encoding change, verification phase may be flaky because
// regions could be still in transition.
ZKAssign.blockUntilNoRIT(TEST_UTIL.getZooKeeperWatcher());
}
示例4: testOpenedRegionHandlerOnMasterRestart
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test
public void testOpenedRegionHandlerOnMasterRestart() throws Exception {
// Start the cluster
log("Starting cluster");
conf = HBaseConfiguration.create();
conf.setBoolean("hbase.assignment.usezk", true);
resetConf = conf;
TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
String tableName = "testOpenedRegionHandlerOnMasterRestart";
MiniHBaseCluster cluster = createRegions(tableName);
abortMaster(cluster);
HRegionServer regionServer = cluster.getRegionServer(0);
Region region = getRegionBeingServed(cluster, regionServer);
// forcefully move a region to OPENED state in zk
// Create a ZKW to use in the test
zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
region, regionServer.getServerName());
// Start up a new master
log("Starting up a new master");
cluster.startMaster().getMaster();
log("Waiting for master to be ready");
cluster.waitForActiveAndReadyMaster();
log("Master is ready");
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
}
示例5: testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtLength
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtLength() throws IOException,
KeeperException, InterruptedException {
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
String userTableName = "testNotConsisIndex2";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col");
IndexSpecification iSpec1 = new IndexSpecification("Index1");
iSpec1.addIndexColumn(hcd, "q1", ValueType.String, 10);
iSpec1.addIndexColumn(hcd, "q2", ValueType.String, 4);
ihtd.addFamily(hcd);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec1);
IndexSpecification iSpec2 = new IndexSpecification("Index2");
iSpec2.addIndexColumn(hcd, "q3", ValueType.String, 10);
iSpec2.addIndexColumn(hcd, "q2", ValueType.String, 10);
indices.addIndex(iSpec2);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
boolean returnVal = false;
try {
admin.createTable(ihtd);
fail("Exception should be thrown");
} catch (IOException e) {
returnVal = true;
}
Assert.assertTrue(returnVal);
ZKAssign.blockUntilNoRIT(zkw);
}
示例6: testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSame
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSame() throws IOException,
KeeperException, InterruptedException {
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
String userTableName = "testNotConsisIndex1";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col");
IndexSpecification iSpec1 = new IndexSpecification("Index1");
iSpec1.addIndexColumn(hcd, "q1", ValueType.String, 10);
ihtd.addFamily(hcd);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec1);
IndexSpecification iSpec2 = new IndexSpecification("Index2");
iSpec2.addIndexColumn(hcd, "q1", ValueType.Int, 10);
indices.addIndex(iSpec2);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
boolean returnVal = false;
try {
admin.createTable(ihtd);
fail("Exception should be thrown");
} catch (IOException e) {
returnVal = true;
}
Assert.assertTrue(returnVal);
ZKAssign.blockUntilNoRIT(zkw);
}
示例7: testIndexManagerWithSplitTransactions
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testIndexManagerWithSplitTransactions() throws Exception {
Configuration conf = UTIL.getConfiguration();
ZooKeeperWatcher zkw = UTIL.getZooKeeperWatcher(UTIL);
conf.setBoolean("hbase.use.secondary.index", true);
String userTableName = "testIndexManagerWithSplitTransactions";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col1");
ihtd.addFamily(hcd);
IndexSpecification iSpec = new IndexSpecification("Index1");
iSpec.addIndexColumn(hcd, "ql", ValueType.String, 10);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
IndexManager manager = IndexManager.getInstance();
int count = manager.getTableRegionCount(userTableName);
Assert.assertEquals(1, count);
HTable table = new HTable(conf, userTableName);
Put p = null;
for (int i = 0; i < 10; i++) {
p = new Put(Bytes.toBytes("row" + i));
p.add(Bytes.toBytes("col1"), Bytes.toBytes("ql"), Bytes.toBytes("test_val"));
table.put(p);
}
admin.split(userTableName, "row5");
Threads.sleep(10000);
ZKAssign.blockUntilNoRIT(zkw);
UTIL.waitUntilAllRegionsAssigned(TableName.valueOf(userTableName));
count = manager.getTableRegionCount(userTableName);
Assert.assertEquals(2, count);
}
示例8: testRegionAssignmentAfterMasterRecoveryDueToZKExpiry
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Tests that the master does not call retainAssignment after recovery from expired zookeeper
* session. Without the HBASE-6046 fix master always tries to assign all the user regions by
* calling retainAssignment.
*/
@Test(timeout = 300000)
public void testRegionAssignmentAfterMasterRecoveryDueToZKExpiry() throws Exception {
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
cluster.startRegionServer();
cluster.waitForActiveAndReadyMaster(10000);
HMaster m = cluster.getMaster();
ZooKeeperWatcher zkw = m.getZooKeeper();
int expectedNumOfListeners = zkw.getNumberOfListeners();
// now the cluster is up. So assign some regions.
Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
try {
byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"),
Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j") };
String tableName = "testRegionAssignmentAfterMasterRecoveryDueToZKExpiry";
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
admin.createTable(htd, SPLIT_KEYS);
ZooKeeperWatcher zooKeeperWatcher = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
ZKAssign.blockUntilNoRIT(zooKeeperWatcher);
m.getZooKeeper().close();
MockLoadBalancer.retainAssignCalled = false;
m.abort("Test recovery from zk session expired",
new KeeperException.SessionExpiredException());
assertTrue(m.isStopped()); // Master doesn't recover any more
// The recovered master should not call retainAssignment, as it is not a
// clean startup.
assertFalse("Retain assignment should not be called", MockLoadBalancer.retainAssignCalled);
// number of listeners should be same as the value before master aborted
// wait for new master is initialized
cluster.waitForActiveAndReadyMaster(120000);
assertEquals(expectedNumOfListeners, zkw.getNumberOfListeners());
} finally {
admin.close();
}
}
示例9: testOpenedRegionHandlerOnMasterRestart
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test
public void testOpenedRegionHandlerOnMasterRestart() throws Exception {
// Start the cluster
log("Starting cluster");
conf = HBaseConfiguration.create();
conf.setBoolean("hbase.assignment.usezk", true);
resetConf = conf;
TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
String tableName = "testOpenedRegionHandlerOnMasterRestart";
MiniHBaseCluster cluster = createRegions(tableName);
abortMaster(cluster);
HRegionServer regionServer = cluster.getRegionServer(0);
HRegion region = getRegionBeingServed(cluster, regionServer);
// forcefully move a region to OPENED state in zk
// Create a ZKW to use in the test
zkw = HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
region, regionServer.getServerName());
// Start up a new master
log("Starting up a new master");
cluster.startMaster().getMaster();
log("Waiting for master to be ready");
cluster.waitForActiveAndReadyMaster();
log("Master is ready");
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
}
示例10: blockUntilNoRIT
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master)
throws KeeperException, InterruptedException {
ZKAssign.blockUntilNoRIT(zkw);
master.assignmentManager.waitUntilNoRegionsInTransition(60000);
}
示例11: blockUntilNoRIT
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master)
throws KeeperException, InterruptedException {
ZKAssign.blockUntilNoRIT(zkw);
master.assignmentManager.waitUntilNoRegionsInTransition(60000);
}
示例12: testShouldCheckMasterFailOverWhenMETAIsInOpenedState
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
throws Exception {
LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
final int NUM_MASTERS = 1;
final int NUM_RS = 2;
// Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.master.info.port", -1);
conf.setBoolean("hbase.assignment.usezk", true);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
// Find regionserver carrying meta.
List<RegionServerThread> regionServerThreads =
cluster.getRegionServerThreads();
Region metaRegion = null;
HRegionServer metaRegionServer = null;
for (RegionServerThread regionServerThread : regionServerThreads) {
HRegionServer regionServer = regionServerThread.getRegionServer();
metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
regionServer.abort("");
if (null != metaRegion) {
metaRegionServer = regionServer;
break;
}
}
TEST_UTIL.shutdownMiniHBaseCluster();
// Create a ZKW to use in the test
ZooKeeperWatcher zkw =
HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
metaRegion, metaRegionServer.getServerName());
LOG.info("Staring cluster for second time");
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
while (!master.isInitialized()) {
Thread.sleep(100);
}
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
zkw.close();
// Stop the cluster
TEST_UTIL.shutdownMiniCluster();
}
示例13: testMetaRebuildOverlapFail
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testMetaRebuildOverlapFail() throws Exception {
// Add a new .regioninfo meta entry in hdfs
byte[] startKey = splits[0];
byte[] endKey = splits[2];
createRegion(conf, htbl, startKey, endKey);
wipeOutMeta();
// is meta really messed up?
assertEquals(1, scanMeta());
assertErrors(doFsck(conf, false),
new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
// Note, would like to check # of tables, but this takes a while to time
// out.
// shutdown the minicluster
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.shutdownMiniZKCluster();
// attempt to rebuild meta table from scratch
HBaseFsck fsck = new HBaseFsck(conf);
assertFalse(fsck.rebuildMeta(false));
Multimap<byte[], HbckInfo> problems = fsck.getOverlapGroups(table);
assertEquals(1, problems.keySet().size());
assertEquals(3, problems.size());
// bring up the minicluster
TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default
TEST_UTIL.restartHBaseCluster(3);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
LOG.info("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
LOG.info("No more RIT in ZK, now doing final test verification");
int tries = 60;
while(TEST_UTIL.getHBaseCluster()
.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 &&
tries-- > 0) {
LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster()
.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition());
Thread.sleep(1000);
}
// Meta still messed up.
assertEquals(1, scanMeta());
HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration());
LOG.info("Tables present after restart: " + Arrays.toString(htbls));
// After HBASE-451 HBaseAdmin.listTables() gets table descriptors from FS,
// so the table is still present and this should be 1.
assertEquals(1, htbls.length);
assertErrors(doFsck(conf, false),
new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
}
示例14: testMetaRebuildHoleFail
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testMetaRebuildHoleFail() throws Exception {
// Fully remove a meta entry and hdfs region
byte[] startKey = splits[1];
byte[] endKey = splits[2];
deleteRegion(conf, htbl, startKey, endKey);
wipeOutMeta();
// is meta really messed up?
assertEquals(1, scanMeta());
assertErrors(doFsck(conf, false), new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
// Note, would like to check # of tables, but this takes a while to time
// out.
// shutdown the minicluster
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.shutdownMiniZKCluster();
// attempt to rebuild meta table from scratch
HBaseFsck fsck = new HBaseFsck(conf);
assertFalse(fsck.rebuildMeta(false));
fsck.close();
// bring up the minicluster
TEST_UTIL.startMiniZKCluster(); // tables seem enabled by default
TEST_UTIL.restartHBaseCluster(3);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
LOG.info("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
LOG.info("No more RIT in ZK, now doing final test verification");
int tries = 60;
while(TEST_UTIL.getHBaseCluster()
.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 &&
tries-- > 0) {
LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster()
.getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition());
Thread.sleep(1000);
}
// Meta still messed up.
assertEquals(1, scanMeta());
HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration());
LOG.info("Tables present after restart: " + Arrays.toString(htbls));
// After HBASE-451 HBaseAdmin.listTables() gets table descriptors from FS,
// so the table is still present and this should be 1.
assertEquals(1, htbls.length);
assertErrors(doFsck(conf, false), new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
}
示例15: testMetaRebuild
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testMetaRebuild() throws Exception {
wipeOutMeta();
// is meta really messed up?
assertEquals(1, scanMeta());
assertErrors(doFsck(conf, false),
new ERROR_CODE[] {
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED});
// Note, would like to check # of tables, but this takes a while to time
// out.
// shutdown the minicluster
TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.shutdownMiniZKCluster();
HConnectionManager.deleteConnection(conf);
// rebuild meta table from scratch
HBaseFsck fsck = new HBaseFsck(conf);
assertTrue(fsck.rebuildMeta(false));
// bring up the minicluster
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.restartHBaseCluster(3);
TEST_UTIL.getHBaseAdmin().enableTable(table);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
LOG.info("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
LOG.info("No more RIT in ZK, now doing final test verification");
// everything is good again.
assertEquals(5, scanMeta());
HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables();
LOG.info("Tables present after restart: " + Arrays.toString(htbls));
assertEquals(1, htbls.length);
assertErrors(doFsck(conf, false), new ERROR_CODE[] {});
LOG.info("Table " + table + " has " + tableRowCount(conf, table)
+ " entries.");
assertEquals(16, tableRowCount(conf, table));
}