本文整理汇总了Java中org.apache.hadoop.hbase.zookeeper.ZKAssign.getData方法的典型用法代码示例。如果您正苦于以下问题:Java ZKAssign.getData方法的具体用法?Java ZKAssign.getData怎么用?Java ZKAssign.getData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.zookeeper.ZKAssign
的用法示例。
在下文中一共展示了ZKAssign.getData方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: isSplitOrSplitting
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING state.
* @throws KeeperException Can happen if the znode went away in meantime.
*/
private boolean isSplitOrSplitting(final String path) throws KeeperException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING znode gets cleaned up before we
// can get data from it.
RegionTransitionData data = ZKAssign.getData(master.getZooKeeper(), path);
EventType evt = data.getEventType();
switch (evt) {
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
result = true;
break;
default:
break;
}
return result;
}
示例2: OpenRegion
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void OpenRegion(Server server, RegionServerServices rss,
HTableDescriptor htd, HRegionInfo hri)
throws IOException, NodeExistsException, KeeperException {
// Create it OFFLINE node, which is what Master set before sending OPEN RPC
ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
int version = ZKAssign.transitionNodeOpening(server.getZooKeeper(), hri, server.getServerName());
OpenRegionHandler openHandler = new OpenRegionHandler(server, rss, hri, htd, version);
openHandler.process();
RegionTransitionData data = ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName());
// delete the node, which is what Master do after the region is opened
ZKAssign.deleteNode(server.getZooKeeper(), hri.getEncodedName(),
EventType.RS_ZK_REGION_OPENED);
}
示例3: testFailedOpenRegion
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test
public void testFailedOpenRegion() throws Exception {
Server server = new MockServer(HTU);
RegionServerServices rsServices = new MockRegionServerServices();
// Create it OFFLINE, which is what it expects
ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());
ZKAssign.transitionNodeOpening(server.getZooKeeper(), TEST_HRI, server.getServerName());
// Create the handler
OpenRegionHandler handler =
new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD) {
@Override
HRegion openRegion() {
// Fake failure of opening a region due to an IOE, which is caught
return null;
}
};
handler.process();
// Handler should have transitioned it to FAILED_OPEN
RegionTransitionData data =
ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName());
assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, data.getEventType());
}
示例4: testFailedUpdateMeta
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test
public void testFailedUpdateMeta() throws Exception {
Server server = new MockServer(HTU);
RegionServerServices rsServices = new MockRegionServerServices();
// Create it OFFLINE, which is what it expects
ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());
ZKAssign.transitionNodeOpening(server.getZooKeeper(), TEST_HRI, server.getServerName());
// Create the handler
OpenRegionHandler handler =
new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD) {
@Override
boolean updateMeta(final HRegion r) {
// Fake failure of updating META
return false;
}
};
handler.process();
// Handler should have transitioned it to FAILED_OPEN
RegionTransitionData data =
ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName());
assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, data.getEventType());
}
示例5: isSplitOrSplittingOrMergedOrMerging
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
private boolean isSplitOrSplittingOrMergedOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets
// cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) {
LOG.info("Node " + path + " is gone");
return false;
}
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGED:
case RS_ZK_REGION_MERGING:
result = true;
break;
default:
LOG.info("Node " + path + " is in " + rt.getEventType());
break;
}
return result;
}
示例6: isCarryingRegion
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Check if the shutdown server carries the specific region.
* We have a bunch of places that store region location
* Those values aren't consistent. There is a delay of notification.
* The location from zookeeper unassigned node has the most recent data;
* but the node could be deleted after the region is opened by AM.
* The AM's info could be old when OpenedRegionHandler
* processing hasn't finished yet when server shutdown occurs.
* @return whether the serverName currently hosts the region
*/
public boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) {
RegionTransitionData data = null;
try {
data = ZKAssign.getData(master.getZooKeeper(), hri.getEncodedName());
} catch (KeeperException e) {
master.abort("Unexpected ZK exception reading unassigned node for region="
+ hri.getEncodedName(), e);
}
ServerName addressFromZK = (data != null && data.getOrigin() != null) ?
data.getOrigin() : null;
if (addressFromZK != null) {
// if we get something from ZK, we will use the data
boolean matchZK = (addressFromZK != null &&
addressFromZK.equals(serverName));
LOG.debug("based on ZK, current region=" + hri.getRegionNameAsString() +
" is on server=" + addressFromZK +
" server being checked=: " + serverName);
return matchZK;
}
ServerName addressFromAM = getRegionServerOfRegion(hri);
boolean matchAM = (addressFromAM != null &&
addressFromAM.equals(serverName));
LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() +
" is on server=" + (addressFromAM != null ? addressFromAM : "null") +
" server being checked: " + serverName);
return matchAM;
}
示例7: testZKClosingNodeVersionMismatch
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Test if close region can handle ZK closing node version mismatch
* @throws IOException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test public void testZKClosingNodeVersionMismatch()
throws IOException, NodeExistsException, KeeperException {
final Server server = new MockServer(HTU);
final MockRegionServerServices rss = new MockRegionServerServices();
rss.setFileSystem(HTU.getTestFileSystem());
HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri = TEST_HRI;
// open a region first so that it can be closed later
OpenRegion(server, rss, htd, hri);
// close the region
// Create it CLOSING, which is what Master set before sending CLOSE RPC
int versionOfClosingNode = ZKAssign.createNodeClosing(server.getZooKeeper(),
hri, server.getServerName());
// The CloseRegionHandler will validate the expected version
// Given it is set to invalid versionOfClosingNode+1,
// CloseRegionHandler should be M_ZK_REGION_CLOSING
CloseRegionHandler handler =
new CloseRegionHandler(server, rss, hri, false, true,
versionOfClosingNode+1);
handler.process();
// Handler should remain in M_ZK_REGION_CLOSING
RegionTransitionData data =
ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName());
assertTrue(EventType.M_ZK_REGION_CLOSING == data.getEventType());
}
示例8: testCloseRegion
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Test if the region can be closed properly
* @throws IOException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test public void testCloseRegion()
throws IOException, NodeExistsException, KeeperException {
final Server server = new MockServer(HTU);
final MockRegionServerServices rss = new MockRegionServerServices();
rss.setFileSystem(HTU.getTestFileSystem());
HTableDescriptor htd = TEST_HTD;
HRegionInfo hri = TEST_HRI;
// open a region first so that it can be closed later
OpenRegion(server, rss, htd, hri);
// close the region
// Create it CLOSING, which is what Master set before sending CLOSE RPC
int versionOfClosingNode = ZKAssign.createNodeClosing(server.getZooKeeper(),
hri, server.getServerName());
// The CloseRegionHandler will validate the expected version
// Given it is set to correct versionOfClosingNode,
// CloseRegionHandlerit should be RS_ZK_REGION_CLOSED
CloseRegionHandler handler =
new CloseRegionHandler(server, rss, hri, false, true,
versionOfClosingNode);
handler.process();
// Handler should have transitioned it to RS_ZK_REGION_CLOSED
RegionTransitionData data =
ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName());
assertTrue(EventType.RS_ZK_REGION_CLOSED == data.getEventType());
}
示例9: testTransitionToFailedOpenEvenIfCleanupFails
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test
public void testTransitionToFailedOpenEvenIfCleanupFails() throws Exception {
MiniHBaseCluster cluster = HTU.getHBaseCluster();
HRegionServer server =
cluster.getLiveRegionServerThreads().get(0).getRegionServer();
// Create it OFFLINE, which is what it expects
ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());
ZKAssign.transitionNodeOpening(server.getZooKeeper(), TEST_HRI, server.getServerName());
// Create the handler
OpenRegionHandler handler = new OpenRegionHandler(server, server, TEST_HRI, TEST_HTD) {
@Override
boolean updateMeta(HRegion r) {
return false;
};
@Override
void cleanupFailedOpen(HRegion region) throws IOException {
throw new IOException("FileSystem got closed.");
}
};
((TestOpenRegionHandlerRegionServer)server).addRegionsInTransition(TEST_HRI, "OPEN");
try {
handler.process();
} catch (Exception e) {
// Ignore the IOException that we have thrown from cleanupFailedOpen
}
RegionTransitionData data =
ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName());
assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, data.getEventType());
}
示例10: processDeadServersAndRecoverLostRegions
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Processes list of dead servers from result of META scan and regions in RIT
* <p>
* This is used for failover to recover the lost regions that belonged to
* RegionServers which failed while there was no active master or regions
* that were in RIT.
* <p>
*
* @param deadServers
* The list of dead servers which failed while there was no active
* master. Can be null.
* @param nodes
* The regions in RIT
* @throws IOException
* @throws KeeperException
*/
private void processDeadServersAndRecoverLostRegions(
Map<ServerName, List<Pair<HRegionInfo, Result>>> deadServers,
List<String> nodes) throws IOException, KeeperException {
if (null != deadServers) {
Set<ServerName> actualDeadServers = this.serverManager.getDeadServers();
for (Map.Entry<ServerName, List<Pair<HRegionInfo, Result>>> deadServer :
deadServers.entrySet()) {
// skip regions of dead servers because SSH will process regions during rs expiration.
// see HBASE-5916
if (actualDeadServers.contains(deadServer.getKey())) {
for (Pair<HRegionInfo, Result> deadRegion : deadServer.getValue()) {
HRegionInfo hri = deadRegion.getFirst();
// Delete znode of region in transition if table is disabled or disabling. If a region
// server went down during master initialization then SSH cannot handle the regions of
// partially disabled tables because in memory region state information may not be
// available with master.
deleteNodeAndOfflineRegion(hri);
nodes.remove(deadRegion.getFirst().getEncodedName());
}
continue;
}
List<Pair<HRegionInfo, Result>> regions = deadServer.getValue();
for (Pair<HRegionInfo, Result> region : regions) {
HRegionInfo regionInfo = region.getFirst();
Result result = region.getSecond();
// If region was in transition (was in zk) force it offline for
// reassign
try {
RegionTransitionData data = ZKAssign.getData(watcher,
regionInfo.getEncodedName());
// If zk node of this region has been updated by a live server,
// we consider that this region is being handled.
// So we should skip it and process it in
// processRegionsInTransition.
if (data != null && data.getOrigin() != null &&
serverManager.isServerOnline(data.getOrigin())) {
LOG.info("The region " + regionInfo.getEncodedName()
+ "is being handled on " + data.getOrigin());
continue;
}
// Process with existing RS shutdown code
boolean assign = ServerShutdownHandler.processDeadRegion(
regionInfo, result, this, this.catalogTracker);
if (assign) {
ZKAssign.createOrForceNodeOffline(watcher, regionInfo,
master.getServerName());
if (!nodes.contains(regionInfo.getEncodedName())) {
nodes.add(regionInfo.getEncodedName());
}
}
} catch (KeeperException.NoNodeException nne) {
// This is fine
}
}
}
}
if (!nodes.isEmpty()) {
for (String encodedRegionName : nodes) {
processRegionInTransition(encodedRegionName, null, deadServers);
}
}
}
示例11: testRegionShouldNotBeDeployed
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* the region is not deployed when the table is disabled.
*/
@Test
public void testRegionShouldNotBeDeployed() throws Exception {
String table = "tableRegionShouldNotBeDeployed";
try {
LOG.info("Starting testRegionShouldNotBeDeployed.");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
assertTrue(cluster.waitForActiveAndReadyMaster());
// Create a ZKW to use in the test
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = filesystem.makeQualified(new Path(conf
.get(HConstants.HBASE_DIR)));
byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
HTableDescriptor htdDisabled = new HTableDescriptor(Bytes.toBytes(table));
htdDisabled.addFamily(new HColumnDescriptor(FAM));
// Write the .tableinfo
FSTableDescriptors
.createTableDescriptor(filesystem, rootdir, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
// Let's just assign everything to first RS
HRegionServer hrs = cluster.getRegionServer(0);
ServerName serverName = hrs.getServerName();
// create region files.
TEST_UTIL.getHBaseAdmin().disableTable(table);
TEST_UTIL.getHBaseAdmin().enableTable(table);
// Region of disable table was opened on RS
TEST_UTIL.getHBaseAdmin().disableTable(table);
HRegionInfo region = disabledRegions.remove(0);
ZKAssign.createNodeOffline(zkw, region, serverName);
hrs.openRegion(region);
int iTimes = 0;
while (true) {
RegionTransitionData rtd = ZKAssign.getData(zkw,
region.getEncodedName());
if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) {
break;
}
Thread.sleep(100);
iTimes++;
if (iTimes >= REGION_ONLINE_TIMEOUT) {
break;
}
}
assertTrue(iTimes < REGION_ONLINE_TIMEOUT);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.SHOULD_NOT_BE_DEPLOYED });
// fix this fault
doFsck(conf, true);
// check result
assertNoErrors(doFsck(conf, false));
} finally {
TEST_UTIL.getHBaseAdmin().enableTable(table);
deleteTable(table);
}
}
示例12: testRSSplitEphemeralsDisappearButDaughtersAreOnlinedAfterShutdownHandling
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* A test that intentionally has master fail the processing of the split message.
* Tests that the regionserver split ephemeral node gets cleaned up if it
* crashes and that after we process server shutdown, the daughters are up on
* line.
* @throws IOException
* @throws InterruptedException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test (timeout = 300000) public void testRSSplitEphemeralsDisappearButDaughtersAreOnlinedAfterShutdownHandling()
throws IOException, InterruptedException, NodeExistsException, KeeperException {
final byte [] tableName =
Bytes.toBytes("ephemeral");
// Create table then get the single region for our new table.
HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
try {
// Add a bit of load up into the table so splittable.
TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
// Get region pre-split.
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
printOutRegions(server, "Initial regions: ");
int regionCount = server.getOnlineRegions().size();
// Now, before we split, set special flag in master, a flag that has
// it FAIL the processing of split.
SplitRegionHandler.TEST_SKIP = true;
// Now try splitting and it should work.
split(hri, server, regionCount);
// Get daughters
List<HRegion> daughters = checkAndGetDaughters(tableName);
// Assert the ephemeral node is up in zk.
String path = ZKAssign.getNodeName(t.getConnection().getZooKeeperWatcher(),
hri.getEncodedName());
Stat stats =
t.getConnection().getZooKeeperWatcher().getRecoverableZooKeeper().exists(path, false);
LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats=" + stats);
RegionTransitionData rtd =
ZKAssign.getData(t.getConnection().getZooKeeperWatcher(),
hri.getEncodedName());
// State could be SPLIT or SPLITTING.
assertTrue(rtd.getEventType().equals(EventType.RS_ZK_REGION_SPLIT) ||
rtd.getEventType().equals(EventType.RS_ZK_REGION_SPLITTING));
// Now crash the server
cluster.abortRegionServer(tableRegionIndex);
waitUntilRegionServerDead();
awaitDaughters(tableName, daughters.size());
// Assert daughters are online.
regions = cluster.getRegions(tableName);
for (HRegion r: regions) {
assertTrue(daughters.contains(r));
}
// Finally assert that the ephemeral SPLIT znode was cleaned up.
for (int i=0; i<100; i++) {
// wait a bit (10s max) for the node to disappear
stats = t.getConnection().getZooKeeperWatcher().getRecoverableZooKeeper().exists(path, false);
if (stats == null) break;
Thread.sleep(100);
}
LOG.info("EPHEMERAL NODE AFTER SERVER ABORT, path=" + path + ", stats=" + stats);
assertTrue(stats == null);
} finally {
// Set this flag back.
SplitRegionHandler.TEST_SKIP = false;
admin.setBalancerRunning(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
t.close();
}
}
示例13: testMasterRestartWhenSplittingIsPartial
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Verifies HBASE-5806. When splitting is partially done and the master goes down
* when the SPLIT node is in either SPLIT or SPLITTING state.
*
* @throws IOException
* @throws InterruptedException
* @throws NodeExistsException
* @throws KeeperException
*/
@Test(timeout = 300000)
public void testMasterRestartWhenSplittingIsPartial()
throws IOException, InterruptedException, NodeExistsException,
KeeperException {
final byte[] tableName = Bytes.toBytes("testMasterRestartWhenSplittingIsPartial");
// Create table then get the single region for our new table.
HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
List<HRegion> regions = cluster.getRegions(tableName);
HRegionInfo hri = getAndCheckSingleTableRegion(regions);
int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);
// Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false);
// Turn off balancer so it doesn't cut in and mess up our placements.
this.admin.setBalancerRunning(false, true);
try {
// Add a bit of load up into the table so splittable.
TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY);
// Get region pre-split.
HRegionServer server = cluster.getRegionServer(tableRegionIndex);
printOutRegions(server, "Initial regions: ");
int regionCount = server.getOnlineRegions().size();
// Now, before we split, set special flag in master, a flag that has
// it FAIL the processing of split.
SplitRegionHandler.TEST_SKIP = true;
// Now try splitting and it should work.
split(hri, server, regionCount);
// Get daughters
checkAndGetDaughters(tableName);
// Assert the ephemeral node is up in zk.
String path = ZKAssign.getNodeName(t.getConnection()
.getZooKeeperWatcher(), hri.getEncodedName());
Stat stats = t.getConnection().getZooKeeperWatcher()
.getRecoverableZooKeeper().exists(path, false);
LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats="
+ stats);
RegionTransitionData rtd = ZKAssign.getData(t.getConnection()
.getZooKeeperWatcher(), hri.getEncodedName());
// State could be SPLIT or SPLITTING.
assertTrue(rtd.getEventType().equals(EventType.RS_ZK_REGION_SPLIT)
|| rtd.getEventType().equals(EventType.RS_ZK_REGION_SPLITTING));
// abort and wait for new master.
MockMasterWithoutCatalogJanitor master = abortAndWaitForMaster();
this.admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());
// update the hri to be offlined and splitted.
hri.setOffline(true);
hri.setSplit(true);
ServerName regionServerOfRegion = master.getAssignmentManager()
.getRegionServerOfRegion(hri);
assertTrue(regionServerOfRegion != null);
} finally {
// Set this flag back.
SplitRegionHandler.TEST_SKIP = false;
admin.setBalancerRunning(true, false);
cluster.getMaster().setCatalogJanitorEnabled(true);
t.close();
}
}