本文整理汇总了Java中org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo类的典型用法代码示例。如果您正苦于以下问题:Java StateChangeRequestInfo类的具体用法?Java StateChangeRequestInfo怎么用?Java StateChangeRequestInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StateChangeRequestInfo类属于org.apache.hadoop.ha.HAServiceProtocol包,在下文中一共展示了StateChangeRequestInfo类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: convert
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
private StateChangeRequestInfo convert(HAStateChangeRequestInfoProto proto) {
RequestSource src;
switch (proto.getReqSource()) {
case REQUEST_BY_USER:
src = RequestSource.REQUEST_BY_USER;
break;
case REQUEST_BY_USER_FORCED:
src = RequestSource.REQUEST_BY_USER_FORCED;
break;
case REQUEST_BY_ZKFC:
src = RequestSource.REQUEST_BY_ZKFC;
break;
default:
LOG.warn("Unknown request source: " + proto.getReqSource());
src = null;
}
return new StateChangeRequestInfo(src);
}
示例2: assertCanStartHaNameNodes
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
private void assertCanStartHaNameNodes(String pathSuffix)
throws ServiceFailedException, IOException, URISyntaxException,
InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
cluster.getNameNode(0).getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(TEST_PATH, pathSuffix);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(newPath));
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
}
}
}
示例3: testEnterSafeModeInANNShouldNotThrowNPE
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
/**
* Test case for enter safemode in active namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
banner("Restarting active");
DFSTestUtil
.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
restartActive();
nn0.getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem = nn0.getNamesystem();
String status = namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'", status
.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem
.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem
.isInSafeMode());
}
示例4: testCallbackSynchronizationTimingActive
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
/**
* Helper method to test that neutral mode does not race with an active
* transition.
*
* @param as the admin service
* @param ees the embedded elector service
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
*/
private void testCallbackSynchronizationTimingActive(AdminService as,
ActiveStandbyElectorBasedElectorService ees)
throws IOException, InterruptedException {
synchronized (ees.zkDisconnectLock) {
// Sleep while holding the lock so that the timer thread can't do
// anything when it runs. Sleep until we're pretty sure the timer thread
// has tried to run.
Thread.sleep(100);
// While still holding the lock cancel the timer by transitioning. This
// simulates a race where the callback goes to cancel the timer while the
// timer is trying to run.
ees.becomeActive();
}
// Sleep just a little more so that the timer thread can do whatever it's
// going to do, hopefully nothing.
Thread.sleep(50);
verify(as).transitionToActive((StateChangeRequestInfo)any());
verify(as, never()).transitionToStandby((StateChangeRequestInfo)any());
}
示例5: testCallbackSynchronizationTimingStandby
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
/**
* Helper method to test that neutral mode does not race with an active
* transition.
*
* @param as the admin service
* @param ees the embedded elector service
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
*/
private void testCallbackSynchronizationTimingStandby(AdminService as,
ActiveStandbyElectorBasedElectorService ees)
throws IOException, InterruptedException {
synchronized (ees.zkDisconnectLock) {
// Sleep while holding the lock so that the timer thread can't do
// anything when it runs. Sleep until we're pretty sure the timer thread
// has tried to run.
Thread.sleep(100);
// While still holding the lock cancel the timer by transitioning. This
// simulates a race where the callback goes to cancel the timer while the
// timer is trying to run.
ees.becomeStandby();
}
// Sleep just a little more so that the timer thread can do whatever it's
// going to do, hopefully nothing.
Thread.sleep(50);
verify(as, atLeast(1)).transitionToStandby((StateChangeRequestInfo)any());
verify(as, atMost(1)).transitionToStandby((StateChangeRequestInfo)any());
}
示例6: monitorHealth
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
public static void monitorHealth(HAServiceProtocol svc,
StateChangeRequestInfo reqInfo)
throws IOException {
try {
svc.monitorHealth();
} catch (RemoteException e) {
throw e.unwrapRemoteException(HealthCheckFailedException.class);
}
}
示例7: transitionToActive
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
public static void transitionToActive(HAServiceProtocol svc,
StateChangeRequestInfo reqInfo)
throws IOException {
try {
svc.transitionToActive(reqInfo);
} catch (RemoteException e) {
throw e.unwrapRemoteException(ServiceFailedException.class);
}
}
示例8: transitionToStandby
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
public static void transitionToStandby(HAServiceProtocol svc,
StateChangeRequestInfo reqInfo)
throws IOException {
try {
svc.transitionToStandby(reqInfo);
} catch (RemoteException e) {
throw e.unwrapRemoteException(ServiceFailedException.class);
}
}
示例9: testBecomingActiveFails
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
/**
* Test that the ZKFC successfully quits the election when it fails to
* become active. This allows the old node to successfully fail back.
*/
@Test
public void testBecomingActiveFails() throws Exception {
cluster.start();
DummyHAService svc1 = cluster.getService(1);
LOG.info("Making svc1 fail to become active");
cluster.setFailToBecomeActive(1, true);
LOG.info("Faking svc0 unhealthy, should NOT successfully " +
"failover to svc1");
cluster.setHealthy(0, false);
cluster.waitForHealthState(0, State.SERVICE_UNHEALTHY);
cluster.waitForActiveLockHolder(null);
Mockito.verify(svc1.proxy, Mockito.timeout(2000).atLeastOnce())
.transitionToActive(Mockito.<StateChangeRequestInfo>any());
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.STANDBY);
LOG.info("Faking svc0 healthy again, should go back to svc0");
cluster.setHealthy(0, true);
cluster.waitForHAState(0, HAServiceState.ACTIVE);
cluster.waitForHAState(1, HAServiceState.STANDBY);
cluster.waitForActiveLockHolder(0);
// Ensure that we can fail back to svc1 once it it is able
// to become active (e.g the admin has restarted it)
LOG.info("Allowing svc1 to become active, expiring svc0");
svc1.failToBecomeActive = false;
cluster.expireAndVerifyFailover(0, 1);
}
示例10: checkHaStateChange
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
/**
* Check that a request to change this node's HA state is valid.
* In particular, verifies that, if auto failover is enabled, non-forced
* requests from the HAAdmin CLI are rejected, and vice versa.
*
* @param req the request to check
* @throws AccessControlException if the request is disallowed
*/
void checkHaStateChange(StateChangeRequestInfo req)
throws AccessControlException {
boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
switch (req.getSource()) {
case REQUEST_BY_USER:
if (autoHaEnabled) {
throw new AccessControlException(
"Manual HA control for this NameNode is disallowed, because " +
"automatic HA is enabled.");
}
break;
case REQUEST_BY_USER_FORCED:
if (autoHaEnabled) {
LOG.warn("Allowing manual HA control from " +
Server.getRemoteAddress() +
" even though automatic HA is enabled, because the user " +
"specified the force flag");
}
break;
case REQUEST_BY_ZKFC:
if (!autoHaEnabled) {
throw new AccessControlException(
"Request from ZK failover controller at " +
Server.getRemoteAddress() + " denied since automatic HA " +
"is not enabled");
}
break;
}
}
示例11: testMutativeOperationsWithAutoHaEnabled
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
/**
* Test that, if automatic HA is enabled, none of the mutative operations
* will succeed, unless the -forcemanual flag is specified.
* @throws Exception
*/
@Test
public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
// Turn on auto-HA in the config
HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
tool.setConf(conf);
// Should fail without the forcemanual flag
assertEquals(-1, runTool("-transitionToActive", "nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
assertEquals(-1, runTool("-transitionToStandby", "nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
Mockito.verify(mockProtocol, Mockito.never())
.transitionToActive(anyReqInfo());
Mockito.verify(mockProtocol, Mockito.never())
.transitionToStandby(anyReqInfo());
// Force flag should bypass the check and change the request source
// for the RPC
setupConfirmationOnSystemIn();
assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1"));
setupConfirmationOnSystemIn();
assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1"));
Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive(
reqInfoCaptor.capture());
Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby(
reqInfoCaptor.capture());
// All of the RPCs should have had the "force" source
for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource());
}
}
示例12: assertCanStartHANameNodes
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; //导入依赖的package包/类
private void assertCanStartHANameNodes(MiniDFSCluster cluster,
Configuration conf, String path) throws ServiceFailedException,
IOException, URISyntaxException, InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
cluster
.getNameNode(0)
.getRpcServer()
.transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(path);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(newPath));
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
}
}
}