本文整理汇总了Java中org.apache.hadoop.hbase.zookeeper.ZKAssign.deleteNodeFailSilent方法的典型用法代码示例。如果您正苦于以下问题:Java ZKAssign.deleteNodeFailSilent方法的具体用法?Java ZKAssign.deleteNodeFailSilent怎么用?Java ZKAssign.deleteNodeFailSilent使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.zookeeper.ZKAssign
的用法示例。
在下文中一共展示了ZKAssign.deleteNodeFailSilent方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: onRegionOpen
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void onRegionOpen(final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (getTableStateManager().isTableState(hri.getTable(),
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
invokeUnAssign(hri);
}
}
示例2: onRegionOpen
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
private void onRegionOpen(
final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (getTableStateManager().isTableState(hri.getTable(),
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
invokeUnAssign(hri);
}
}
示例3: after
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@After
public void after() throws Exception {
// Clean the state if the test failed before cleaning the znode
// It does not manage all bad failures, so if there are multiple failures, only
// the first one should be looked at.
ZKAssign.deleteNodeFailSilent(HTU.getZooKeeperWatcher(), hri);
}
示例4: after
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@After
public void after() throws Exception {
// Clean the state if the test failed before cleaning the znode
// It does not manage all bad failures, so if there are multiple failures, only
// the first one should be looked at.
ZKAssign.deleteNodeFailSilent(HTU.getZooKeeperWatcher(), hriPrimary);
}
示例5: deleteNodeAndOfflineRegion
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Delete znode of region in transition if table is disabling/disabled and offline the region.
* @param hri region to offline.
*/
public void deleteNodeAndOfflineRegion(HRegionInfo hri) {
if (zkTable.isDisablingOrDisabledTable(hri.getTableNameAsString())) {
try {
// If table is partially disabled then delete znode if exists in any state.
ZKAssign.deleteNodeFailSilent(this.master.getZooKeeper(), hri);
} catch (KeeperException ke) {
this.master.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
}
regionOffline(hri);
}
}
示例6: cleanOutCrashedServerReferences
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Clean out crashed server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> cleanOutCrashedServerReferences(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i = this.regionPlans.entrySet().iterator();
i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (tableStateManager.isTableState(hri.getTable(),
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
示例7: testCleanUpDaughtersNotInMetaAfterFailedSplit
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
TableName table = TableName.valueOf("testCleanUpDaughtersNotInMetaAfterFailedSplit");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
createTable(TEST_UTIL, desc, null);
tbl = new HTable(cluster.getConfiguration(), desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3"));
st.prepare();
st.stepsBeforePONR(regionServer, regionServer, false);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
Map<String, RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
for (RegionState state : regionsInTransition.values()) {
am.regionOffline(state.getRegion());
}
ZKAssign.deleteNodeFailSilent(regionServer.getZooKeeper(), regions.get(0).getRegionInfo());
Map<HRegionInfo, ServerName> regionsMap = new HashMap<HRegionInfo, ServerName>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(table).size());
// fix hole
assertErrors(
doFsck(
conf, false, true, false, false, false, false, false, false, false, false, false, null),
new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(table);
}
}
示例8: processServerShutdown
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (tableStateManager.isTableState(hri.getTable(),
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
示例9: testCleanUpDaughtersNotInMetaAfterFailedSplit
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
TableName table = TableName.valueOf("testCleanUpDaughtersNotInMetaAfterFailedSplit");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
admin.createTable(desc);
tbl = new HTable(cluster.getConfiguration(), desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
cluster.getServerWith(regions.get(0).getRegionName());
SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3"));
st.prepare();
st.stepsBeforePONR(regionServer, regionServer, false);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
Map<String, RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
for (RegionState state : regionsInTransition.values()) {
am.regionOffline(state.getRegion());
}
ZKAssign.deleteNodeFailSilent(regionServer.getZooKeeper(), regions.get(0).getRegionInfo());
Map<HRegionInfo, ServerName> regionsMap = new HashMap<HRegionInfo, ServerName>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(table).size());
// fix hole
assertErrors(
doFsck(
conf, false, true, false, false, false, false, false, false, false, false, false, null),
new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(table);
}
}
示例10: processServerShutdown
import org.apache.hadoop.hbase.zookeeper.ZKAssign; //导入方法依赖的package包/类
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTable())) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}