本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.ha.HAContext类的典型用法代码示例。如果您正苦于以下问题:Java HAContext类的具体用法?Java HAContext怎么用?Java HAContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
HAContext类属于org.apache.hadoop.hdfs.server.namenode.ha包,在下文中一共展示了HAContext类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startCommonServices
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
/**
* Start services common to both active and standby states
* @param haContext
* @throws IOException
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert safeMode != null &&
!safeMode.isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
getCompleteBlocksTotal());
setBlockTotal();
blockManager.activate(conf);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
}
示例2: startCommonServices
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
/**
* Start services common to both active and standby states
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert safeMode != null && !isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
getCompleteBlocksTotal());
setBlockTotal();
blockManager.activate(conf);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
snapshotManager.registerMXBean();
}
示例3: startCommonServices
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
/**
* Start services common to both active and standby states
* @param haContext
* @throws IOException
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert safeMode != null && !isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
getCompleteBlocksTotal());
setBlockTotal();
blockManager.activate(conf);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
}
示例4: startCommonServices
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
/**
* Start services common to both active and standby states
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert safeMode != null && !isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
getCompleteBlocksTotal());
setBlockTotal();
blockManager.activate(conf);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
if (inodeAttributeProvider != null) {
inodeAttributeProvider.start();
dir.setINodeAttributeProvider(inodeAttributeProvider);
}
snapshotManager.registerMXBean();
}
示例5: enterState
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
@Override // HAState
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start backup services", e);
}
}
示例6: exitState
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
@Override // HAState
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop backup services", e);
}
}
示例7: testReplQueuesActiveAfterStartupSafemode
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
FSNamesystem fsn = Mockito.spy(fsNamesystem);
//Make shouldPopulaeReplQueues return true
HAContext haContext = Mockito.mock(HAContext.class);
HAState haState = Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Whitebox.setInternalState(fsn, "haContext", haContext);
//Make NameNode.getNameNodeMetrics() not return null
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first "
+ "safemode", !fsn.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving "
+ "safemode", fsn.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering "
+ "safemode 2nd time", fsn.isPopulatingReplQueues());
}
示例8: startCommonServices
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
/**
* Start services common to both active and standby states
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert !blockManager.isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
long completeBlocksTotal = getCompleteBlocksTotal();
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
completeBlocksTotal);
blockManager.activate(conf, completeBlocksTotal);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
if (inodeAttributeProvider != null) {
inodeAttributeProvider.start();
dir.setINodeAttributeProvider(inodeAttributeProvider);
}
snapshotManager.registerMXBean();
}
示例9: testReplQueuesActiveAfterStartupSafemode
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
FSNamesystem fsn = Mockito.spy(fsNamesystem);
BlockManager bm = fsn.getBlockManager();
Whitebox.setInternalState(bm, "namesystem", fsn);
//Make shouldPopulaeReplQueues return true
HAContext haContext = Mockito.mock(HAContext.class);
HAState haState = Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Mockito.when(fsn.getHAContext()).thenReturn(haContext);
//Make NameNode.getNameNodeMetrics() not return null
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first "
+ "safemode", !bm.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving "
+ "safemode", bm.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering "
+ "safemode 2nd time", bm.isPopulatingReplQueues());
}
示例10: startCommonServices
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
/**
* Start services common to both active and standby states
*/
void startCommonServices(Configuration conf, HAContext haContext) throws IOException {
this.registerMBean(); // register the MBean for the FSNamesystemState
writeLock();
this.haContext = haContext;
try {
nnResourceChecker = new NameNodeResourceChecker(conf);
checkAvailableResources();
assert safeMode != null && !isPopulatingReplQueues();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAFEMODE);
prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
getCompleteBlocksTotal());
setBlockTotal();
blockManager.activate(conf);
} finally {
writeUnlock();
}
registerMXBean();
DefaultMetricsSystem.instance().register(this);
snapshotManager.registerMXBean();
authzProvider = ReflectionUtils.newInstance(conf.getClass(
DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
DefaultAuthorizationProvider.class,
AuthorizationProvider.class), conf);
authzProvider.start();
AuthorizationProvider.set(authzProvider);
snapshotManager.initAuthorizationProvider();
}
示例11: createHAContext
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
protected HAContext createHAContext() {
return new NameNodeHAContext();
}
示例12: checkOperation
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
@Override // HAState
public void checkOperation(HAContext context, OperationCategory op)
throws StandbyException {
context.checkOperation(op);
}
示例13: prepareToExitState
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; //导入依赖的package包/类
@Override // HAState
public void prepareToExitState(HAContext context) throws ServiceFailedException {
context.prepareToStopStandbyServices();
}