本文整理汇总了Java中org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv类的典型用法代码示例。如果您正苦于以下问题:Java MasterProcedureEnv类的具体用法?Java MasterProcedureEnv怎么用?Java MasterProcedureEnv使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MasterProcedureEnv类属于org.apache.hadoop.hbase.master.procedure包,在下文中一共展示了MasterProcedureEnv类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startProcedureExecutor
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
private void startProcedureExecutor() throws IOException {
final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
final Path logDir = new Path(fileSystemManager.getRootDir(),
MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
procEnv.getProcedureQueue());
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
Math.max(Runtime.getRuntime().availableProcessors(),
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
final boolean abortOnCorruption = conf.getBoolean(
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
procedureStore.start(numThreads);
procedureExecutor.start(numThreads, abortOnCorruption);
}
示例2: testAbortProcedure
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Test
public void testAbortProcedure() throws Exception {
final TableName tableName = TableName.valueOf("testAbortProcedure");
final ProcedureExecutor<MasterProcedureEnv> procExec =
TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
Procedure proc = new TestTableDDLProcedure(procExec.getEnvironment(), tableName);
proc.setOwner(USER_OWNER.getShortName());
final long procId = procExec.submitProcedure(proc);
AccessTestAction abortProcedureAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER
.preAbortProcedure(ObserverContext.createAndPrepare(CP_ENV, null), procExec, procId);
return null;
}
};
verifyAllowed(abortProcedureAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
verifyAllowed(abortProcedureAction, USER_OWNER);
verifyDenied(
abortProcedureAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
示例3: testGetProcedures
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Test
public void testGetProcedures() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec =
TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
Procedure proc = new TestTableDDLProcedure(procExec.getEnvironment(), tableName);
proc.setOwner(USER_OWNER);
procExec.submitProcedure(proc);
final List<Procedure<?>> procList = procExec.getProcedures();
AccessTestAction getProceduresAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER
.postGetProcedures(ObserverContextImpl.createAndPrepare(CP_ENV));
return null;
}
};
verifyAllowed(getProceduresAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
verifyAllowed(getProceduresAction, USER_OWNER);
verifyIfNull(
getProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
示例4: execute
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
protected synchronized Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
if (dispatched) {
if (succ) {
return null;
}
// retry
dispatched = false;
}
if (!env.getRemoteDispatcher().addOperationToNode(targetServer, this)) {
LOG.info("Can not add remote operation for refreshing peer {} for {} to {}, " +
"this usually because the server is already dead, " +
"give up and mark the procedure as complete", peerId, type, targetServer);
return null;
}
dispatched = true;
event = new ProcedureEvent<>(this);
event.suspendIfNotReady(this);
throw new ProcedureSuspendedException();
}
示例5: executeFromState
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
protected Flow executeFromState(final MasterProcedureEnv env, final MoveRegionState state)
throws InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " execute state=" + state);
}
switch (state) {
case MOVE_REGION_UNASSIGN:
addChildProcedure(new UnassignProcedure(plan.getRegionInfo(), plan.getSource(),
plan.getDestination(), true));
setNextState(MoveRegionState.MOVE_REGION_ASSIGN);
break;
case MOVE_REGION_ASSIGN:
AssignProcedure assignProcedure = plan.getDestination() == null ?
new AssignProcedure(plan.getRegionInfo()):
new AssignProcedure(plan.getRegionInfo(), plan.getDestination());
addChildProcedure(assignProcedure);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
return Flow.HAS_MORE_STATE;
}
示例6: testRollbackAndDoubleExecution
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Test
public void testRollbackAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
List<RegionInfo> tableRegions = createTable(tableName);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
RegionInfo[] regionsToMerge = new RegionInfo[2];
regionsToMerge[0] = tableRegions.get(0);
regionsToMerge[1] = tableRegions.get(1);
long procId = procExec.submitProcedure(
new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
// Failing before MERGE_TABLE_REGIONS_CREATE_MERGED_REGION we should trigger the rollback
// NOTE: the 5 (number before MERGE_TABLE_REGIONS_CREATE_MERGED_REGION step) is
// hardcoded, so you have to look at this test at least once when you add a new step.
int numberOfSteps = 5;
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps,
true);
}
示例7: startProcedureExecutor
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
private void startProcedureExecutor() throws IOException {
final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
procedureStore = new WALProcedureStore(conf,
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
configurationManager.registerObserver(procEnv);
int cpus = Runtime.getRuntime().availableProcessors();
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
Math.max((cpus > 0? cpus/4: 0),
MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
final boolean abortOnCorruption = conf.getBoolean(
MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
procedureStore.start(numThreads);
procedureExecutor.start(numThreads, abortOnCorruption);
procEnv.getRemoteDispatcher().start();
}
示例8: remoteCallFailed
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
public void remoteCallFailed(final MasterProcedureEnv env,
final ServerName serverName, final IOException exception) {
final RegionStateNode regionNode = getRegionState(env);
String msg = exception.getMessage() == null? exception.getClass().getSimpleName():
exception.getMessage();
LOG.warn("Remote call failed " + this + "; " + regionNode.toShortString() +
"; exception=" + msg);
if (remoteCallFailed(env, regionNode, exception)) {
// NOTE: This call to wakeEvent puts this Procedure back on the scheduler.
// Thereafter, another Worker can be in here so DO NOT MESS WITH STATE beyond
// this method. Just get out of this current processing quickly.
regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
}
// else leave the procedure in suspended state; it is waiting on another call to this callback
}
示例9: addToRemoteDispatcher
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
/**
* Be careful! At the end of this method, the procedure has either succeeded
* and this procedure has been set into a suspended state OR, we failed and
* this procedure has been put back on the scheduler ready for another worker
* to pick it up. In both cases, we need to exit the current Worker processing
* immediately!
* @return True if we successfully dispatched the call and false if we failed;
* if failed, we need to roll back any setup done for the dispatch.
*/
protected boolean addToRemoteDispatcher(final MasterProcedureEnv env,
final ServerName targetServer) {
assert targetServer == null || targetServer.equals(getRegionState(env).getRegionLocation()):
"targetServer=" + targetServer + " getRegionLocation=" +
getRegionState(env).getRegionLocation(); // TODO
LOG.info("Dispatch " + this + "; " + getRegionState(env).toShortString());
// Put this procedure into suspended mode to wait on report of state change
// from remote regionserver. Means Procedure associated ProcedureEvent is marked not 'ready'.
getRegionState(env).getProcedureEvent().suspend();
// Tricky because the below call to addOperationToNode can fail. If it fails, we need to
// backtrack on stuff like the 'suspend' done above -- tricky as the 'wake' requests us -- and
// ditto up in the caller; it needs to undo state changes. Inside in remoteCallFailed, it does
// wake to undo the above suspend.
if (!env.getRemoteDispatcher().addOperationToNode(targetServer, this)) {
remoteCallFailed(env, targetServer,
new FailedRemoteDispatchException(this + " to " + targetServer));
return false;
}
return true;
}
示例10: block
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
/**
* An ugly utility to be removed when refactor TableNamespaceManager.
* @throws TimeoutIOException
*/
private static void block(final MasterServices services, final long procId)
throws TimeoutIOException {
int timeoutInMillis = services.getConfiguration().
getInt(ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY,
ClusterSchema.DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT);
long deadlineTs = EnvironmentEdgeManager.currentTime() + timeoutInMillis;
ProcedureExecutor<MasterProcedureEnv> procedureExecutor =
services.getMasterProcedureExecutor();
while(EnvironmentEdgeManager.currentTime() < deadlineTs) {
if (procedureExecutor.isFinished(procId)) return;
// Sleep some
Threads.sleep(10);
}
throw new TimeoutIOException("Procedure pid=" + procId + " is still running");
}
示例11: acquireLock
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
protected LockState acquireLock(final MasterProcedureEnv env) {
// Unless we are assigning meta, wait for meta to be available and loaded.
if (!isMeta() && (env.waitFailoverCleanup(this) ||
env.getAssignmentManager().waitMetaInitialized(this, getRegionInfo()))) {
return LockState.LOCK_EVENT_WAIT;
}
// TODO: Revisit this and move it to the executor
if (env.getProcedureScheduler().waitRegion(this, getRegionInfo())) {
try {
LOG.debug(LockState.LOCK_EVENT_WAIT + " pid=" + getProcId() + " " +
env.getProcedureScheduler().dumpLocks());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return LockState.LOCK_EVENT_WAIT;
}
this.lock = true;
return LockState.LOCK_ACQUIRED;
}
示例12: execute
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
protected Procedure<MasterProcedureEnv>[] execute(final MasterProcedureEnv env)
throws ProcedureSuspendedException {
// Local master locks don't store any state, so on recovery, simply finish this procedure
// immediately.
if (recoveredMasterLock) return null;
if (lockAcquireLatch != null) {
lockAcquireLatch.countDown();
}
if (unlock.get() || hasHeartbeatExpired()) {
locked.set(false);
LOG.debug((unlock.get()? "UNLOCKED " : "TIMED OUT ") + toString());
return null;
}
synchronized (event) {
event.suspend();
event.suspendIfNotReady(this);
setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
}
throw new ProcedureSuspendedException();
}
示例13: acquireLock
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
protected LockState acquireLock(final MasterProcedureEnv env) {
if (env.waitInitialized(this)) return LockState.LOCK_EVENT_WAIT;
if (env.getProcedureScheduler().waitRegions(this, getTableName(),
mergedRegion, regionsToMerge[0], regionsToMerge[1])) {
try {
LOG.debug(LockState.LOCK_EVENT_WAIT + " " + env.getProcedureScheduler().dumpLocks());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return LockState.LOCK_EVENT_WAIT;
}
this.lock = true;
return LockState.LOCK_ACQUIRED;
}
示例14: preAbortProcedure
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
@Override
public void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId) throws IOException {
if (!procEnv.isProcedureOwner(procId, getActiveUser())) {
// If the user is not the procedure owner, then we should further probe whether
// he can abort the procedure.
requirePermission("abortProcedure", Action.ADMIN);
}
}
示例15: preAbortProcedure
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; //导入依赖的package包/类
public boolean preAbortProcedure(
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
oserver.preAbortProcedure(ctx, procEnv, procId);
}
});
}