当前位置: 首页>>代码示例>>Java>>正文


Java HAUtil.getConfForOtherNode方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HAUtil.getConfForOtherNode方法的典型用法代码示例。如果您正苦于以下问题:Java HAUtil.getConfForOtherNode方法的具体用法?Java HAUtil.getConfForOtherNode怎么用?Java HAUtil.getConfForOtherNode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HAUtil的用法示例。


在下文中一共展示了HAUtil.getConfForOtherNode方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setNameNodeAddresses

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Determine the address of the NN we are checkpointing
 * as well as our own HTTP address from the configuration.
 * @throws IOException 
 */
private void setNameNodeAddresses(Configuration conf) throws IOException {
  // Look up our own address.
  String myAddrString = getHttpAddress(conf);

  // Look up the active node's address
  Configuration confForActive = HAUtil.getConfForOtherNode(conf);
  activeNNAddress = getHttpAddress(confForActive);
  
  // Sanity-check.
  Preconditions.checkArgument(checkAddress(activeNNAddress),
      "Bad address for active NN: %s", activeNNAddress);
  Preconditions.checkArgument(checkAddress(myAddrString),
      "Bad address for standby NN: %s", myAddrString);
  myNNAddress = NetUtils.createSocketAddr(myAddrString);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:StandbyCheckpointer.java

示例2: setNameNodeAddresses

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Determine the address of the NN we are checkpointing
 * as well as our own HTTP address from the configuration.
 * @throws IOException 
 */
private void setNameNodeAddresses(Configuration conf) throws IOException {
  // Look up our own address.
  myNNAddress = getHttpAddress(conf);

  // Look up the active node's address
  Configuration confForActive = HAUtil.getConfForOtherNode(conf);
  activeNNAddress = getHttpAddress(confForActive);
  
  // Sanity-check.
  Preconditions.checkArgument(checkAddress(activeNNAddress),
      "Bad address for active NN: %s", activeNNAddress);
  Preconditions.checkArgument(checkAddress(myNNAddress),
      "Bad address for standby NN: %s", myNNAddress);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:StandbyCheckpointer.java

示例3: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:BootstrapStandby.java

示例4: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
  otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
      otherIpcAddr.getHostName());
  
  
  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:35,代码来源:BootstrapStandby.java

示例5: getActiveNodeAddress

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private InetSocketAddress getActiveNodeAddress() {
  Configuration activeConf = HAUtil.getConfForOtherNode(conf);
  return NameNode.getServiceAddress(activeConf, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:EditLogTailer.java

示例6: isValidRequestor

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
static boolean isValidRequestor(ServletContext context, String remoteUser,
    Configuration conf) throws IOException {
  if (remoteUser == null) { // This really shouldn't happen...
    LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
    return false;
  }

  Set<String> validRequestors = new HashSet<String>();

  validRequestors.add(SecurityUtil.getServerPrincipal(conf
      .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      NameNode.getAddress(conf).getHostName()));
  try {
    validRequestors.add(
        SecurityUtil.getServerPrincipal(conf
            .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
            SecondaryNameNode.getHttpAddress(conf).getHostName()));
  } catch (Exception e) {
    // Don't halt if SecondaryNameNode principal could not be added.
    LOG.debug("SecondaryNameNode principal could not be added", e);
    String msg = String.format(
      "SecondaryNameNode principal not considered, %s = %s, %s = %s",
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
      conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
    LOG.warn(msg);
  }

  if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
    Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
    validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
        .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
        NameNode.getAddress(otherNnConf).getHostName()));
  }

  for (String v : validRequestors) {
    if (v != null && v.equals(remoteUser)) {
      LOG.info("ImageServlet allowing checkpointer: " + remoteUser);
      return true;
    }
  }

  if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
    LOG.info("ImageServlet allowing administrator: " + remoteUser);
    return true;
  }

  LOG.info("ImageServlet rejecting: " + remoteUser);
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:ImageServlet.java

示例7: isValidRequestor

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
static boolean isValidRequestor(ServletContext context, String remoteUser,
    Configuration conf) throws IOException {
  if (remoteUser == null) { // This really shouldn't happen...
    LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
    return false;
  }

  Set<String> validRequestors = new HashSet<String>();

  validRequestors.add(SecurityUtil.getServerPrincipal(conf
      .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      NameNode.getAddress(conf).getHostName()));
  try {
    validRequestors.add(
        SecurityUtil.getServerPrincipal(conf
            .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
            SecondaryNameNode.getHttpAddress(conf).getHostName()));
  } catch (Exception e) {
    // Don't halt if SecondaryNameNode principal could not be added.
    LOG.debug("SecondaryNameNode principal could not be added", e);
    String msg = String.format(
      "SecondaryNameNode principal not considered, %s = %s, %s = %s",
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
      conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
    LOG.warn(msg);
  }

  if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
    Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
    validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
        .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
        NameNode.getAddress(otherNnConf).getHostName()));
  }

  for (String v : validRequestors) {
    if (v != null && v.equals(remoteUser)) {
      LOG.info("ImageServlet allowing checkpointer: " + remoteUser);
      return true;
    }
  }

  if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
    LOG.info("ImageServlet allowing administrator: " + remoteUser);
    return true;
  }

  LOG.info("ImageServlet rejecting: " + remoteUser);
  return false;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:54,代码来源:ImageServlet.java

示例8: isValidRequestor

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
static boolean isValidRequestor(ServletContext context, String remoteUser,
    Configuration conf) throws IOException {
  if(remoteUser == null) { // This really shouldn't happen...
    LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
    return false;
  }
  
  Set<String> validRequestors = new HashSet<String>();

  validRequestors.add(
      SecurityUtil.getServerPrincipal(conf
          .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
          .getAddress(conf).getHostName()));
  validRequestors.add(
      SecurityUtil.getServerPrincipal(conf
          .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
          SecondaryNameNode.getHttpAddress(conf).getHostName()));

  if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
    Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
    validRequestors.add(
        SecurityUtil.getServerPrincipal(otherNnConf
            .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
            NameNode.getAddress(otherNnConf).getHostName()));
  }

  for(String v : validRequestors) {
    if(v != null && v.equals(remoteUser)) {
      LOG.info("GetImageServlet allowing checkpointer: " + remoteUser);
      return true;
    }
  }
  
  if (HttpServer.userHasAdministratorAccess(context, remoteUser)) {
    LOG.info("GetImageServlet allowing administrator: " + remoteUser);
    return true;
  }
  
  LOG.info("GetImageServlet rejecting: " + remoteUser);
  return false;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:43,代码来源:GetImageServlet.java


注:本文中的org.apache.hadoop.hdfs.HAUtil.getConfForOtherNode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。