本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode类的典型用法代码示例。如果您正苦于以下问题:Java SecondaryNameNode类的具体用法?Java SecondaryNameNode怎么用?Java SecondaryNameNode使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SecondaryNameNode类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了SecondaryNameNode类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSecondaryNameNodeDoesNotStart
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; //导入依赖的package包/类
/**
* Test that the 2NN does not start if given a config with HA NNs.
*/
@Test
public void testSecondaryNameNodeDoesNotStart() throws IOException {
// Note we're not explicitly setting the nameservice Id in the
// config as it is not required to be set and we want to test
// that we can determine if HA is enabled when the nameservice Id
// is not explicitly defined.
Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
try {
new SecondaryNameNode(conf);
fail("Created a 2NN with an HA config");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Cannot use SecondaryNameNode in an HA cluster", ioe);
}
}
示例2: testSecondaryNodePorts
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; //导入依赖的package包/类
/**
* Verify secondary name-node port usage.
*/
public void testSecondaryNodePorts() throws Exception {
NameNode nn = null;
try {
nn = startNameNode();
// bind http server to the same port as name-node
Configuration conf2 = new Configuration(config);
conf2.set("dfs.secondary.http.address",
config.get("dfs.http.address"));
SecondaryNameNode.LOG.info("= Starting 1 on: " +
conf2.get("dfs.secondary.http.address"));
boolean started = canStartSecondaryNode(conf2);
assertFalse(started); // should fail
// bind http server to a different port
conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
SecondaryNameNode.LOG.info("= Starting 2 on: " +
conf2.get("dfs.secondary.http.address"));
started = canStartSecondaryNode(conf2);
assertTrue(started); // should start now
} finally {
stopNameNode(nn);
}
}
示例3: main
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; //导入依赖的package包/类
public static void main(String... args) throws Exception {
new AosProcessLauncher() {
@Override
public void process() throws Exception {
SecondaryNameNode.main(new String[] {});
}
}.launch();
while (true) {
LOGGER.info("Sleeping...");
TimeUnit.MINUTES.sleep(1);
}
}
示例4: isValidRequestor
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; //导入依赖的package包/类
protected boolean isValidRequestor(HttpServletRequest request, Configuration conf)
throws IOException {
String remotePrincipal = request.getUserPrincipal().getName();
String remoteShortName = request.getRemoteUser();
if (remotePrincipal == null) { // This really shouldn't happen...
LOG.warn("Received null remoteUser while authorizing access to " +
"GetJournalEditServlet");
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Validating request made by " + remotePrincipal +
" / " + remoteShortName + ". This user is: " +
UserGroupInformation.getLoginUser());
}
Set<String> validRequestors = new HashSet<String>();
validRequestors.addAll(DFSUtil.getAllNnPrincipals(conf));
try {
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
SecondaryNameNode.getHttpAddress(conf).getHostName()));
} catch (Exception e) {
// Don't halt if SecondaryNameNode principal could not be added.
LOG.debug("SecondaryNameNode principal could not be added", e);
String msg = String.format(
"SecondaryNameNode principal not considered, %s = %s, %s = %s",
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
LOG.warn(msg);
}
// Check the full principal name of all the configured valid requestors.
for (String v : validRequestors) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is comparing to valid requestor: " + v);
if (v != null && v.equals(remotePrincipal)) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing: " + remotePrincipal);
return true;
}
}
// Additionally, we compare the short name of the requestor to this JN's
// username, because we want to allow requests from other JNs during
// recovery, but we can't enumerate the full list of JNs.
if (remoteShortName.equals(
UserGroupInformation.getLoginUser().getShortUserName())) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing other JN principal: " +
remotePrincipal);
return true;
}
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is rejecting: " + remotePrincipal);
return false;
}
示例5: isValidRequestor
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; //导入依赖的package包/类
protected boolean isValidRequestor(HttpServletRequest request, Configuration conf)
throws IOException {
String remotePrincipal = request.getUserPrincipal().getName();
String remoteShortName = request.getRemoteUser();
if (remotePrincipal == null) { // This really shouldn't happen...
LOG.warn("Received null remoteUser while authorizing access to " +
"GetJournalEditServlet");
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Validating request made by " + remotePrincipal +
" / " + remoteShortName + ". This user is: " +
UserGroupInformation.getLoginUser());
}
Set<String> validRequestors = new HashSet<String>();
validRequestors.addAll(DFSUtil.getAllNnPrincipals(conf));
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
SecondaryNameNode.getHttpAddress(conf).getHostName()));
// Check the full principal name of all the configured valid requestors.
for (String v : validRequestors) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is comparing to valid requestor: " + v);
if (v != null && v.equals(remotePrincipal)) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing: " + remotePrincipal);
return true;
}
}
// Additionally, we compare the short name of the requestor to this JN's
// username, because we want to allow requests from other JNs during
// recovery, but we can't enumerate the full list of JNs.
if (remoteShortName.equals(
UserGroupInformation.getLoginUser().getShortUserName())) {
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing other JN principal: " +
remotePrincipal);
return true;
}
if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is rejecting: " + remotePrincipal);
return false;
}
示例6: main
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; //导入依赖的package包/类
public static void main(String... args) throws Exception {
new AosProcessLauncher() {
@Override
public void process() throws Exception {
NameNode.main(new String[] {});
}
}.launch();
new AosProcessLauncher() {
@Override
public void process() throws Exception {
SecondaryNameNode.main(new String[] {});
}
}.launch();
new AosProcessLauncher() {
@Override
public void process() throws Exception {
DataNode.main(new String[] {});
}
}.launch();
new AosProcessLauncher() {
@Override
public void process() throws Exception {
ResourceManager.main(new String[] {});
}
}.launch();
new AosProcessLauncher() {
@Override
public void process() throws Exception {
NodeManager.main(new String[] {});
}
}.launch();
while (true) {
LOGGER.info("Sleeping...");
TimeUnit.MINUTES.sleep(1);
}
}