当前位置: 首页>>代码示例>>Java>>正文


Java HAUtil.isHAEnabled方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HAUtil.isHAEnabled方法的典型用法代码示例。如果您正苦于以下问题:Java HAUtil.isHAEnabled方法的具体用法?Java HAUtil.isHAEnabled怎么用?Java HAUtil.isHAEnabled使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HAUtil的用法示例。


在下文中一共展示了HAUtil.isHAEnabled方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: rollingRollback

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FSImage.java

示例2: create

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DFSZKFailoverController.java

示例3: rollingRollback

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  // HDFS-7939: purge all old fsimage_rollback_*
  archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:FSImage.java

示例4: cloneHaNnCredentials

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Clones the delegation token to individual host behind the same logical address.
 *
 * @param config the hadoop configuration
 * @throws IOException if failed to get information for the current user.
 */
public static void cloneHaNnCredentials(Configuration config) throws IOException {
  String scheme = URI.create(config.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
                                        CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)).getScheme();

  // Loop through all name services. Each name service could have multiple name node associated with it.
  for (Map.Entry<String, Map<String, InetSocketAddress>> entry : DFSUtil.getHaNnRpcAddresses(config).entrySet()) {
    String nsId = entry.getKey();
    Map<String, InetSocketAddress> addressesInNN = entry.getValue();
    if (!HAUtil.isHAEnabled(config, nsId) || addressesInNN == null || addressesInNN.isEmpty()) {
      continue;
    }

    // The client may have a delegation token set for the logical
    // URI of the cluster. Clone this token to apply to each of the
    // underlying IPC addresses so that the IPC code can find it.
    URI uri = URI.create(scheme + "://" + nsId);

    LOG.info("Cloning delegation token for uri {}", uri);
    HAUtil.cloneDelegationTokenForLogicalUri(UserGroupInformation.getCurrentUser(), uri, addressesInNN.values());
  }
}
 
开发者ID:apache,项目名称:twill,代码行数:28,代码来源:YarnUtils.java

示例5: create

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:DFSZKFailoverController.java

示例6: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:BootstrapStandby.java

示例7: initEditLog

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public void initEditLog(StartupOption startOpt) throws IOException {
  Preconditions.checkState(getNamespaceID() != 0,
      "Must know namespace ID before initting edit log");
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
    // If this NN is not HA
    editLog.initJournalsForWrite();
    editLog.recoverUnclosedStreams();
  } else if (HAUtil.isHAEnabled(conf, nameserviceId)
      && (startOpt == StartupOption.UPGRADE
          || startOpt == StartupOption.UPGRADEONLY
          || RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) {
    // This NN is HA, but we're doing an upgrade or a rollback of rolling
    // upgrade so init the edit log for write.
    editLog.initJournalsForWrite();
    if (startOpt == StartupOption.UPGRADE
        || startOpt == StartupOption.UPGRADEONLY) {
      long sharedLogCTime = editLog.getSharedLogCTime();
      if (this.storage.getCTime() < sharedLogCTime) {
        throw new IOException("It looks like the shared log is already " +
            "being upgraded but this NN has not been upgraded yet. You " +
            "should restart this NameNode with the '" +
            StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
            "this NN in sync with the other.");
      }
    }
    editLog.recoverUnclosedStreams();
  } else {
    // This NN is HA and we're not doing an upgrade.
    editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:FSImage.java

示例8: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
  otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
      otherIpcAddr.getHostName());
  
  
  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:35,代码来源:BootstrapStandby.java

示例9: initEditLog

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public void initEditLog() {
  Preconditions.checkState(getNamespaceID() != 0,
      "Must know namespace ID before initting edit log");
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
    editLog.initJournalsForWrite();
    editLog.recoverUnclosedStreams();
  } else {
    editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:12,代码来源:FSImage.java

示例10: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:41,代码来源:BlockManager.java

示例11: isValidRequestor

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
static boolean isValidRequestor(ServletContext context, String remoteUser,
    Configuration conf) throws IOException {
  if (remoteUser == null) { // This really shouldn't happen...
    LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
    return false;
  }

  Set<String> validRequestors = new HashSet<String>();

  validRequestors.add(SecurityUtil.getServerPrincipal(conf
      .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      NameNode.getAddress(conf).getHostName()));
  try {
    validRequestors.add(
        SecurityUtil.getServerPrincipal(conf
            .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
            SecondaryNameNode.getHttpAddress(conf).getHostName()));
  } catch (Exception e) {
    // Don't halt if SecondaryNameNode principal could not be added.
    LOG.debug("SecondaryNameNode principal could not be added", e);
    String msg = String.format(
      "SecondaryNameNode principal not considered, %s = %s, %s = %s",
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
      conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
    LOG.warn(msg);
  }

  if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
    Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
    validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
        .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
        NameNode.getAddress(otherNnConf).getHostName()));
  }

  for (String v : validRequestors) {
    if (v != null && v.equals(remoteUser)) {
      LOG.info("ImageServlet allowing checkpointer: " + remoteUser);
      return true;
    }
  }

  if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
    LOG.info("ImageServlet allowing administrator: " + remoteUser);
    return true;
  }

  LOG.info("ImageServlet rejecting: " + remoteUser);
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:ImageServlet.java

示例12: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.error("Security is enabled but block access tokens " +
          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
          "aren't enabled. This may cause issues " +
          "when clients attempt to talk to a DataNode.");
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:BlockManager.java

示例13: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }


  remoteNNs = RemoteNameNodeInfo.getRemoteNameNodes(conf, nsId);
  // validate the configured NNs
  List<RemoteNameNodeInfo> remove = new ArrayList<RemoteNameNodeInfo>(remoteNNs.size());
  for (RemoteNameNodeInfo info : remoteNNs) {
    InetSocketAddress address = info.getIpcAddress();
    LOG.info("Found nn: " + info.getNameNodeID() + ", ipc: " + info.getIpcAddress());
    if (address.getPort() == 0 || address.getAddress().isAnyLocalAddress()) {
      LOG.error("Could not determine valid IPC address for other NameNode ("
          + info.getNameNodeID() + ") , got: " + address);
      remove.add(info);
    }
  }

  // remove any invalid nns
  remoteNNs.removeAll(remove);

  // make sure we have at least one left to read
  Preconditions.checkArgument(!remoteNNs.isEmpty(), "Could not find any valid namenodes!");

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:BootstrapStandby.java

示例14: isValidRequestor

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
static boolean isValidRequestor(ServletContext context, String remoteUser,
    Configuration conf) throws IOException {
  if (remoteUser == null) { // This really shouldn't happen...
    LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
    return false;
  }

  Set<String> validRequestors = new HashSet<String>();

  validRequestors.add(SecurityUtil.getServerPrincipal(conf
      .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      DFSUtilClient.getNNAddress(conf).getHostName()));
  try {
    validRequestors.add(
        SecurityUtil.getServerPrincipal(conf
            .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
            SecondaryNameNode.getHttpAddress(conf).getHostName()));
  } catch (Exception e) {
    // Don't halt if SecondaryNameNode principal could not be added.
    LOG.debug("SecondaryNameNode principal could not be added", e);
    String msg = String.format(
      "SecondaryNameNode principal not considered, %s = %s, %s = %s",
      DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
      conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
      DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
    LOG.warn(msg);
  }

  if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
    List<Configuration> otherNnConfs = HAUtil.getConfForOtherNodes(conf);
    for (Configuration otherNnConf : otherNnConfs) {
      validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
              .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
          DFSUtilClient.getNNAddress(otherNnConf).getHostName()));
    }
  }

  for (String v : validRequestors) {
    if (v != null && v.equals(remoteUser)) {
      LOG.info("ImageServlet allowing checkpointer: " + remoteUser);
      return true;
    }
  }

  if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
    LOG.info("ImageServlet allowing administrator: " + remoteUser);
    return true;
  }

  LOG.info("ImageServlet rejecting: " + remoteUser);
  return false;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:56,代码来源:ImageServlet.java

示例15: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) throws IOException {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
      String errMessage = "Security is enabled but block access tokens " +
          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
          "aren't enabled. This may cause issues " +
          "when clients attempt to connect to a DataNode. Aborting NameNode";
      throw new IOException(errMessage);
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    // figure out which index we are of the nns
    Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId);
    String nnId = HAUtil.getNameNodeId(conf, nsId);
    int nnIndex = 0;
    for (String id : nnIds) {
      if (id.equals(nnId)) {
        break;
      }
      nnIndex++;
    }
    return new BlockTokenSecretManager(updateMin * 60 * 1000L,
        lifetimeMin * 60 * 1000L, nnIndex, nnIds.size(), null, encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, 1, null, encryptionAlgorithm);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:BlockManager.java


注:本文中的org.apache.hadoop.hdfs.HAUtil.isHAEnabled方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。