本文整理汇总了Java中org.apache.hadoop.hbase.master.AssignmentManager.regionOffline方法的典型用法代码示例。如果您正苦于以下问题:Java AssignmentManager.regionOffline方法的具体用法?Java AssignmentManager.regionOffline怎么用?Java AssignmentManager.regionOffline使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.master.AssignmentManager
的用法示例。
在下文中一共展示了AssignmentManager.regionOffline方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: forceRegionsOffline
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
private void forceRegionsOffline(final List<HRegionInfo> hris) {
AssignmentManager am = this.masterServices.getAssignmentManager();
if (hris != null) {
for (HRegionInfo hri: hris) {
am.regionOffline(hri);
}
}
}
示例2: waitRegionInTransition
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
protected void waitRegionInTransition(final List<HRegionInfo> regions)
throws IOException, CoordinatedStateException {
AssignmentManager am = this.masterServices.getAssignmentManager();
RegionStates states = am.getRegionStates();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
for (HRegionInfo region : regions) {
long done = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < done) {
if (states.isRegionInState(region, State.FAILED_OPEN)) {
am.regionOffline(region);
}
if (!states.isRegionInTransition(region)) break;
try {
Thread.sleep(waitingTimeForEvents);
} catch (InterruptedException e) {
LOG.warn("Interrupted while sleeping");
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
}
LOG.debug("Waiting on region to clear regions in transition; "
+ am.getRegionStates().getRegionTransitionState(region));
}
if (states.isRegionInTransition(region)) {
throw new IOException("Waited hbase.master.wait.on.region (" +
waitTime + "ms) for region to leave region " +
region.getRegionNameAsString() + " in transitions");
}
}
}
示例3: processMeta
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
/**
* @param env
* @return False if we fail to assign and split logs on meta ('process').
* @throws IOException
* @throws InterruptedException
*/
private boolean processMeta(final MasterProcedureEnv env)
throws IOException {
if (LOG.isDebugEnabled()) LOG.debug("Processing hbase:meta that was on " + this.serverName);
MasterServices services = env.getMasterServices();
MasterFileSystem mfs = services.getMasterFileSystem();
AssignmentManager am = services.getAssignmentManager();
HRegionInfo metaHRI = HRegionInfo.FIRST_META_REGIONINFO;
if (this.shouldSplitWal) {
if (this.distributedLogReplay) {
prepareLogReplay(env, META_REGION_SET);
} else {
// TODO: Matteo. We BLOCK here but most important thing to be doing at this moment.
mfs.splitMetaLog(serverName);
am.getRegionStates().logSplit(metaHRI);
}
}
// Assign meta if still carrying it. Check again: region may be assigned because of RIT timeout
boolean processed = true;
boolean shouldAssignMeta = false;
AssignmentManager.ServerHostRegion rsCarryingMetaRegion = am.isCarryingMeta(serverName);
switch (rsCarryingMetaRegion) {
case HOSTING_REGION:
LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
shouldAssignMeta = true;
break;
case UNKNOWN:
if (!services.getMetaTableLocator().isLocationAvailable(services.getZooKeeper())) {
// the meta location as per master is null. This could happen in case when meta
// assignment in previous run failed, while meta znode has been updated to null.
// We should try to assign the meta again.
shouldAssignMeta = true;
break;
}
// fall through
case NOT_HOSTING_REGION:
LOG.info("META has been assigned to otherwhere, skip assigning.");
break;
default:
throw new IOException("Unsupported action in MetaServerShutdownHandler");
}
if (shouldAssignMeta) {
// TODO: May block here if hard time figuring state of meta.
verifyAndAssignMetaWithRetries(env);
if (this.shouldSplitWal && distributedLogReplay) {
int timeout = env.getMasterConfiguration().getInt(KEY_WAIT_ON_RIT, DEFAULT_WAIT_ON_RIT);
if (!waitOnRegionToClearRegionsInTransition(am, metaHRI, timeout)) {
processed = false;
} else {
// TODO: Matteo. We BLOCK here but most important thing to be doing at this moment.
mfs.splitMetaLog(serverName);
}
}
}
return processed;
}
示例4: testCleanUpDaughtersNotInMetaAfterFailedSplit
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
TableName table = TableName.valueOf("testCleanUpDaughtersNotInMetaAfterFailedSplit");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
createTable(TEST_UTIL, desc, null);
tbl = new HTable(cluster.getConfiguration(), desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3"));
st.prepare();
st.stepsBeforePONR(regionServer, regionServer, false);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
Map<String, RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
for (RegionState state : regionsInTransition.values()) {
am.regionOffline(state.getRegion());
}
ZKAssign.deleteNodeFailSilent(regionServer.getZooKeeper(), regions.get(0).getRegionInfo());
Map<HRegionInfo, ServerName> regionsMap = new HashMap<HRegionInfo, ServerName>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(table).size());
// fix hole
assertErrors(
doFsck(
conf, false, true, false, false, false, false, false, false, false, false, false, null),
new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(table);
}
}
示例5: process
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Override
public void process() throws IOException {
boolean gotException = true;
try {
AssignmentManager am = this.services.getAssignmentManager();
this.services.getMasterFileSystem().setLogRecoveryMode();
boolean distributedLogReplay =
(this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
try {
if (this.shouldSplitWal) {
LOG.info("Splitting hbase:meta logs for " + serverName);
if (distributedLogReplay) {
Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
regions.add(HRegionInfo.FIRST_META_REGIONINFO);
this.services.getMasterFileSystem().prepareLogReplay(serverName, regions);
} else {
this.services.getMasterFileSystem().splitMetaLog(serverName);
}
am.getRegionStates().logSplit(HRegionInfo.FIRST_META_REGIONINFO);
}
} catch (IOException ioe) {
this.services.getExecutorService().submit(this);
this.deadServers.add(serverName);
throw new IOException("failed log splitting for " + serverName + ", will retry", ioe);
}
// Assign meta if we were carrying it.
// Check again: region may be assigned to other where because of RIT
// timeout
if (am.isCarryingMeta(serverName)) {
LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
verifyAndAssignMetaWithRetries();
} else if (!server.getMetaTableLocator().isLocationAvailable(this.server.getZooKeeper())) {
// the meta location as per master is null. This could happen in case when meta assignment
// in previous run failed, while meta znode has been updated to null. We should try to
// assign the meta again.
verifyAndAssignMetaWithRetries();
} else {
LOG.info("META has been assigned to otherwhere, skip assigning.");
}
try {
if (this.shouldSplitWal && distributedLogReplay) {
if (!am.waitOnRegionToClearRegionsInTransition(HRegionInfo.FIRST_META_REGIONINFO,
regionAssignmentWaitTimeout)) {
// Wait here is to avoid log replay hits current dead server and incur a RPC timeout
// when replay happens before region assignment completes.
LOG.warn("Region " + HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()
+ " didn't complete assignment in time");
}
this.services.getMasterFileSystem().splitMetaLog(serverName);
}
} catch (Exception ex) {
if (ex instanceof IOException) {
this.services.getExecutorService().submit(this);
this.deadServers.add(serverName);
throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
} else {
throw new IOException(ex);
}
}
gotException = false;
} finally {
if (gotException){
// If we had an exception, this.deadServers.finish will be skipped in super.process()
this.deadServers.finish(serverName);
}
}
super.process();
// Clear this counter on successful handling.
this.eventExceptionCount.set(0);
}
示例6: testCleanUpDaughtersNotInMetaAfterFailedSplit
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
TableName table = TableName.valueOf("testCleanUpDaughtersNotInMetaAfterFailedSplit");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(table);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
admin.createTable(desc);
tbl = new HTable(cluster.getConfiguration(), desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
cluster.getServerWith(regions.get(0).getRegionName());
SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3"));
st.prepare();
st.stepsBeforePONR(regionServer, regionServer, false);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
Map<String, RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
for (RegionState state : regionsInTransition.values()) {
am.regionOffline(state.getRegion());
}
ZKAssign.deleteNodeFailSilent(regionServer.getZooKeeper(), regions.get(0).getRegionInfo());
Map<HRegionInfo, ServerName> regionsMap = new HashMap<HRegionInfo, ServerName>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(table).size());
// fix hole
assertErrors(
doFsck(
conf, false, true, false, false, false, false, false, false, false, false, false, null),
new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(table);
}
}
示例7: process
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Override
public void process() throws IOException {
boolean gotException = true;
try {
AssignmentManager am = this.services.getAssignmentManager();
try {
if (this.shouldSplitHlog) {
LOG.info("Splitting hbase:meta logs for " + serverName);
if (this.distributedLogReplay) {
Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
regions.add(HRegionInfo.FIRST_META_REGIONINFO);
this.services.getMasterFileSystem().prepareLogReplay(serverName, regions);
} else {
this.services.getMasterFileSystem().splitMetaLog(serverName);
}
am.getRegionStates().logSplit(HRegionInfo.FIRST_META_REGIONINFO);
}
} catch (IOException ioe) {
this.services.getExecutorService().submit(this);
this.deadServers.add(serverName);
throw new IOException("failed log splitting for " + serverName + ", will retry", ioe);
}
// Assign meta if we were carrying it.
// Check again: region may be assigned to other where because of RIT
// timeout
if (am.isCarryingMeta(serverName)) {
LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
verifyAndAssignMetaWithRetries();
} else if (!this.services.getCatalogTracker().isMetaLocationAvailable()) {
// the meta location as per master is null. This could happen in case when meta assignment
// in previous run failed, while meta znode has been updated to null. We should try to
// assign the meta again.
verifyAndAssignMetaWithRetries();
} else {
LOG.info("META has been assigned to otherwhere, skip assigning.");
}
try {
if (this.shouldSplitHlog && this.distributedLogReplay) {
if (!am.waitOnRegionToClearRegionsInTransition(HRegionInfo.FIRST_META_REGIONINFO,
regionAssignmentWaitTimeout)) {
// Wait here is to avoid log replay hits current dead server and incur a RPC timeout
// when replay happens before region assignment completes.
LOG.warn("Region " + HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()
+ " didn't complete assignment in time");
}
this.services.getMasterFileSystem().splitMetaLog(serverName);
}
} catch (Exception ex) {
if (ex instanceof IOException) {
this.services.getExecutorService().submit(this);
this.deadServers.add(serverName);
throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
} else {
throw new IOException(ex);
}
}
gotException = false;
} finally {
if (gotException){
// If we had an exception, this.deadServers.finish will be skipped in super.process()
this.deadServers.finish(serverName);
}
}
super.process();
// Clear this counter on successful handling.
this.eventExceptionCount.set(0);
}
示例8: handleTableOperation
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Override
protected void handleTableOperation(List<HRegionInfo> regions)
throws IOException, KeeperException {
MasterCoprocessorHost cpHost = ((HMaster) this.server)
.getCoprocessorHost();
if (cpHost != null) {
cpHost.preDeleteTableHandler(this.tableName);
}
// 1. Wait because of region in transition
AssignmentManager am = this.masterServices.getAssignmentManager();
RegionStates states = am.getRegionStates();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
for (HRegionInfo region : regions) {
long done = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < done) {
if (states.isRegionInState(region, State.FAILED_OPEN)) {
am.regionOffline(region);
}
if (!states.isRegionInTransition(region)) break;
Threads.sleep(waitingTimeForEvents);
LOG.debug("Waiting on region to clear regions in transition; "
+ am.getRegionStates().getRegionTransitionState(region));
}
if (states.isRegionInTransition(region)) {
throw new IOException("Waited hbase.master.wait.on.region (" +
waitTime + "ms) for region to leave region " +
region.getRegionNameAsString() + " in transitions");
}
}
// 2. Remove regions from META
LOG.debug("Deleting regions from META");
MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
// 3. Move the table in /hbase/.tmp
MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
Path tempTableDir = mfs.moveTableToTemp(tableName);
try {
// 4. Delete regions from FS (temp directory)
FileSystem fs = mfs.getFileSystem();
for (HRegionInfo hri: regions) {
LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
}
// 5. Delete table from FS (temp directory)
if (!fs.delete(tempTableDir, true)) {
LOG.error("Couldn't delete " + tempTableDir);
}
LOG.debug("Table '" + tableName + "' archived!");
} finally {
// 6. Update table descriptor cache
LOG.debug("Removing '" + tableName + "' descriptor.");
this.masterServices.getTableDescriptors().remove(tableName);
// 7. Clean up regions of the table in RegionStates.
LOG.debug("Removing '" + tableName + "' from region states.");
states.tableDeleted(tableName);
// 8. If entry for this table in zk, and up in AssignmentManager, remove it.
LOG.debug("Marking '" + tableName + "' as deleted.");
am.getZKTable().setDeletedTable(tableName);
}
if (cpHost != null) {
cpHost.postDeleteTableHandler(this.tableName);
}
}
示例9: process
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Override
public void process() throws IOException {
boolean gotException = true;
try {
AssignmentManager am = this.services.getAssignmentManager();
try {
if (this.shouldSplitHlog) {
LOG.info("Splitting hbase:meta logs for " + serverName);
if (this.distributedLogReplay) {
Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
regions.add(HRegionInfo.FIRST_META_REGIONINFO);
this.services.getMasterFileSystem().prepareLogReplay(serverName, regions);
} else {
this.services.getMasterFileSystem().splitMetaLog(serverName);
}
am.getRegionStates().logSplit(HRegionInfo.FIRST_META_REGIONINFO);
}
} catch (IOException ioe) {
this.services.getExecutorService().submit(this);
this.deadServers.add(serverName);
throw new IOException("failed log splitting for " + serverName + ", will retry", ioe);
}
// Assign meta if we were carrying it.
// Check again: region may be assigned to other where because of RIT
// timeout
if (am.isCarryingMeta(serverName)) {
LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
verifyAndAssignMetaWithRetries();
} else if (!this.services.getCatalogTracker().isMetaLocationAvailable()) {
// the meta location as per master is null. This could happen in case when meta assignment
// in previous run failed, while meta znode has been updated to null. We should try to
// assign the meta again.
verifyAndAssignMetaWithRetries();
} else {
LOG.info("META has been assigned to otherwhere, skip assigning.");
}
try {
if (this.shouldSplitHlog && this.distributedLogReplay) {
if (!am.waitOnRegionToClearRegionsInTransition(HRegionInfo.FIRST_META_REGIONINFO,
regionAssignmentWaitTimeout)) {
// Wait here is to avoid log replay hits current dead server and incur a RPC timeout
// when replay happens before region assignment completes.
LOG.warn("Region " + HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()
+ " didn't complete assignment in time");
}
this.services.getMasterFileSystem().splitMetaLog(serverName);
}
} catch (Exception ex) {
if (ex instanceof IOException) {
this.services.getExecutorService().submit(this);
this.deadServers.add(serverName);
throw new IOException("failed log splitting for " + serverName + ", will retry", ex);
} else {
throw new IOException(ex);
}
}
gotException = false;
} finally {
if (gotException){
// If we had an exception, this.deadServers.finish will be skipped in super.process()
this.deadServers.finish(serverName);
}
}
super.process();
}
示例10: handleTableOperation
import org.apache.hadoop.hbase.master.AssignmentManager; //导入方法依赖的package包/类
@Override
protected void handleTableOperation(List<HRegionInfo> regions)
throws IOException, KeeperException {
MasterCoprocessorHost cpHost = ((HMaster) this.server)
.getCoprocessorHost();
if (cpHost != null) {
cpHost.preDeleteTableHandler(this.tableName);
}
// 1. Wait because of region in transition
AssignmentManager am = this.masterServices.getAssignmentManager();
RegionStates states = am.getRegionStates();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
for (HRegionInfo region : regions) {
long done = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < done) {
if (states.isRegionInState(region, State.FAILED_OPEN)) {
am.regionOffline(region);
}
if (!states.isRegionInTransition(region)) break;
Threads.sleep(waitingTimeForEvents);
LOG.debug("Waiting on region to clear regions in transition; "
+ am.getRegionStates().getRegionTransitionState(region));
}
if (states.isRegionInTransition(region)) {
throw new IOException("Waited hbase.master.wait.on.region (" +
waitTime + "ms) for region to leave region " +
region.getRegionNameAsString() + " in transitions");
}
}
// 2. Remove regions from META
LOG.debug("Deleting regions from META");
MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
// 3. Move the table in /hbase/.tmp
MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
Path tempTableDir = mfs.moveTableToTemp(tableName);
try {
// 4. Delete regions from FS (temp directory)
FileSystem fs = mfs.getFileSystem();
for (HRegionInfo hri: regions) {
LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
}
// 5. Delete table from FS (temp directory)
if (!fs.delete(tempTableDir, true)) {
LOG.error("Couldn't delete " + tempTableDir);
}
LOG.debug("Table '" + tableName + "' archived!");
} finally {
// 6. Update table descriptor cache
LOG.debug("Removing '" + tableName + "' descriptor.");
this.masterServices.getTableDescriptors().remove(tableName);
// 7. If entry for this table in zk, and up in AssignmentManager, remove it.
LOG.debug("Marking '" + tableName + "' as deleted.");
am.getZKTable().setDeletedTable(tableName);
}
if (cpHost != null) {
cpHost.postDeleteTableHandler(this.tableName);
}
}