本文整理汇总了Java中org.apache.hadoop.ha.HAServiceProtocol.RequestSource类的典型用法代码示例。如果您正苦于以下问题:Java RequestSource类的具体用法?Java RequestSource怎么用?Java RequestSource使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RequestSource类属于org.apache.hadoop.ha.HAServiceProtocol包,在下文中一共展示了RequestSource类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doFence
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
private void doFence(HAServiceTarget target) {
LOG.info("Should fence: " + target);
boolean gracefulWorked = new FailoverController(conf,
RequestSource.REQUEST_BY_ZKFC).tryGracefulFence(target);
if (gracefulWorked) {
// It's possible that it's in standby but just about to go into active,
// no? Is there some race here?
LOG.info("Successfully transitioned " + target + " to standby " +
"state without fencing");
return;
}
try {
target.checkFencingConfigured();
} catch (BadFencingConfigurationException e) {
LOG.error("Couldn't fence old active " + target, e);
recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active"));
throw new RuntimeException(e);
}
if (!target.getFencer().fence(target)) {
throw new RuntimeException("Unable to fence " + target);
}
}
示例2: checkManualStateManagementOK
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
/**
* Ensure that we are allowed to manually manage the HA state of the target
* service. If automatic failover is configured, then the automatic
* failover controllers should be doing state management, and it is generally
* an error to use the HAAdmin command line to do so.
*
* @param target the target to check
* @return true if manual state management is allowed
*/
private boolean checkManualStateManagementOK(HAServiceTarget target) {
if (target.isAutoFailoverEnabled()) {
if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) {
errOut.println(
"Automatic failover is enabled for " + target + "\n" +
"Refusing to manually manage HA state, since it may cause\n" +
"a split-brain scenario or other incorrect state.\n" +
"If you are very sure you know what you are doing, please \n" +
"specify the --" + FORCEMANUAL + " flag.");
return false;
} else {
LOG.warn("Proceeding with manual HA state management even though\n" +
"automatic failover is enabled for " + target);
return true;
}
}
return true;
}
示例3: FailoverController
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
public FailoverController(Configuration conf,
RequestSource source) {
this.conf = conf;
this.gracefulFenceConf = new Configuration(conf);
this.requestSource = source;
this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
//Configure less retries for graceful fence
int gracefulFenceConnectRetries = conf.getInt(
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES,
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES_DEFAULT);
gracefulFenceConf.setInt(
CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
gracefulFenceConnectRetries);
gracefulFenceConf.setInt(
CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
gracefulFenceConnectRetries);
}
示例4: convert
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
private StateChangeRequestInfo convert(HAStateChangeRequestInfoProto proto) {
RequestSource src;
switch (proto.getReqSource()) {
case REQUEST_BY_USER:
src = RequestSource.REQUEST_BY_USER;
break;
case REQUEST_BY_USER_FORCED:
src = RequestSource.REQUEST_BY_USER_FORCED;
break;
case REQUEST_BY_ZKFC:
src = RequestSource.REQUEST_BY_ZKFC;
break;
default:
LOG.warn("Unknown request source: " + proto.getReqSource());
src = null;
}
return new StateChangeRequestInfo(src);
}
示例5: assertCanStartHaNameNodes
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
private void assertCanStartHaNameNodes(String pathSuffix)
throws ServiceFailedException, IOException, URISyntaxException,
InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
cluster.getNameNode(0).getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(TEST_PATH, pathSuffix);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(newPath));
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
}
}
}
示例6: testEnterSafeModeInANNShouldNotThrowNPE
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
/**
* Test case for enter safemode in active namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
banner("Restarting active");
DFSTestUtil
.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
restartActive();
nn0.getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem = nn0.getNamesystem();
String status = namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'", status
.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem
.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem
.isInSafeMode());
}
示例7: checkManualStateManagementOK
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
/**
* Ensure that we are allowed to manually manage the HA state of the target
* service. If automatic failover is configured, then the automatic
* failover controllers should be doing state management, and it is generally
* an error to use the HAAdmin command line to do so.
*
* @param target the target to check
* @return true if manual state management is allowed
*/
private boolean checkManualStateManagementOK(HAServiceTarget target) {
if (target.isAutoFailoverEnabled()) {
if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) {
errOut.println(
"Automatic failover is enabled for " + target + "\n" +
"Refusing to manually manage HA state, since it may cause\n" +
"a split-brain scenario or other incorrect state.\n" +
"If you are very sure you know what you are doing, please \n" +
"specify the " + FORCEMANUAL + " flag.");
return false;
} else {
LOG.warn("Proceeding with manual HA state management even though\n" +
"automatic failover is enabled for " + target);
return true;
}
}
return true;
}
示例8: testRedirect
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
@Test(timeout=60000)
public void testRedirect() throws Exception {
// both jobtracker addresses should serve up the jobtracker page
// regardless of state
checkJobTrackerPage("jt1");
checkJobTrackerPage("jt2");
// failover to jt2
FailoverController fc = new FailoverController(conf,
RequestSource.REQUEST_BY_USER);
fc.failover(target1, target2, false, false);
cluster.waitActive();
checkJobTrackerPage("jt1");
checkJobTrackerPage("jt2");
}
示例9: testClientFailover
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; //导入依赖的package包/类
@Test(timeout=60000)
public void testClientFailover() throws Exception {
LOG.info("Running testClientFailover");
startCluster();
// Test with client. c.f. HATestUtil.setFailoverConfigurations
JobClient jc = new JobClient(conf);
assertEquals("client sees jt running", JobTrackerStatus.RUNNING,
jc.getClusterStatus().getJobTrackerStatus());
// failover to jt2
FailoverController fc = new FailoverController(conf,
RequestSource.REQUEST_BY_USER);
fc.failover(target1, target2, false, false);
cluster.waitActive();
assertEquals("jt2 running", JobTrackerStatus.RUNNING,
jt2.getJobTracker().getClusterStatus().getJobTrackerStatus());
assertNull("jt1 not running", jt1.getJobTracker());
assertEquals("client still sees jt running", JobTrackerStatus.RUNNING,
jc.getClusterStatus().getJobTrackerStatus());
}