当前位置: 首页>>代码示例>>Java>>正文


Java HAUtil.getNameNodeId方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HAUtil.getNameNodeId方法的典型用法代码示例。如果您正苦于以下问题:Java HAUtil.getNameNodeId方法的具体用法?Java HAUtil.getNameNodeId怎么用?Java HAUtil.getNameNodeId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HAUtil的用法示例。


在下文中一共展示了HAUtil.getNameNodeId方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doRollback

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
public static boolean doRollback(Configuration conf,
    boolean isConfirmationNeeded) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"rollBack\" will remove the current state of the file system,\n"
      + "returning you to the state prior to initiating your recent.\n"
      + "upgrade. This action is permanent and cannot be undone. If you\n"
      + "are performing a rollback in an HA environment, you should be\n"
      + "certain that no NameNode process is running on any host.");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Roll back file system state?")) {
      System.err.println("Rollback aborted.");
      return true;
    }
  }
  nsys.getFSImage().doRollback(nsys);
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NameNode.java

示例2: create

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DFSZKFailoverController.java

示例3: getRemoteNameNodes

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf, String nsId)
    throws IOException {
  // there is only a single NN configured (and no federation) so we don't have any more NNs
  if (nsId == null) {
    return Collections.emptyList();
  }
  List<Configuration> otherNodes = HAUtil.getConfForOtherNodes(conf);
  List<RemoteNameNodeInfo> nns = new ArrayList<RemoteNameNodeInfo>();

  for (Configuration otherNode : otherNodes) {
    String otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
    // don't do any validation here as in some cases, it can be overwritten later
    InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true);


    final String scheme = DFSUtil.getHttpClientScheme(conf);
    URL otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(otherIpcAddr.getHostName(),
        otherNode, scheme).toURL();

    nns.add(new RemoteNameNodeInfo(otherNode, otherNNId, otherIpcAddr, otherHttpAddr));
  }
  return nns;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:RemoteNameNodeInfo.java

示例4: finalize

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static boolean finalize(Configuration conf,
                             boolean isConfirmationNeeded
                             ) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"finalize\" will remove the previous state of the files system.\n"
      + "Recent upgrade will become permanent.\n"
      + "Rollback option will not be available anymore.\n");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Finalize filesystem state?")) {
      System.err.println("Finalize aborted.");
      return true;
    }
  }
  nsys.dir.fsImage.finalizeUpgrade();
  return false;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:NameNode.java

示例5: create

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:DFSZKFailoverController.java

示例6: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:BootstrapStandby.java

示例7: printMetadataVersion

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Verify that configured directories exist, then print the metadata versions
 * of the software and the image.
 *
 * @param conf configuration to use
 * @throws IOException
 */
private static boolean printMetadataVersion(Configuration conf)
  throws IOException {
  final String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  final String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, namenodeId);
  final FSImage fsImage = new FSImage(conf);
  final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
  return fsImage.recoverTransitionRead(
    StartupOption.METADATAVERSION, fs, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:NameNode.java

示例8: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
  otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
      otherIpcAddr.getHostName());
  
  
  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:35,代码来源:BootstrapStandby.java

示例9: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:41,代码来源:BlockManager.java

示例10: format

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Verify that configured directories exist, then
 * Interactively confirm that formatting is desired 
 * for each existing directory and format them.
 * 
 * @param conf configuration to use
 * @param force if true, format regardless of whether dirs exist
 * @return true if formatting was aborted, false otherwise
 * @throws IOException
 */
private static boolean format(Configuration conf, boolean force,
    boolean isInteractive) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);
  checkAllowFormat(conf);

  if (UserGroupInformation.isSecurityEnabled()) {
    InetSocketAddress socAddr = getAddress(conf);
    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
        DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  }
  
  Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
  List<URI> dirsToPrompt = new ArrayList<URI>();
  dirsToPrompt.addAll(nameDirsToFormat);
  dirsToPrompt.addAll(sharedDirs);
  List<URI> editDirsToFormat = 
               FSNamesystem.getNamespaceEditsDirs(conf);

  // if clusterID is not provided - see if you can find the current one
  String clusterId = StartupOption.FORMAT.getClusterId();
  if(clusterId == null || clusterId.equals("")) {
    //Generate a new cluster id
    clusterId = NNStorage.newClusterID();
  }
  System.out.println("Formatting using clusterid: " + clusterId);
  
  FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
  try {
    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
    fsImage.getEditLog().initJournalsForWrite();

    if (!fsImage.confirmFormat(force, isInteractive)) {
      return true; // aborted
    }

    fsImage.format(fsn, clusterId);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception during format: ", ioe);
    fsImage.close();
    throw ioe;
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:NameNode.java

示例11: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
      LOG.error("Security is enabled but block access tokens " +
          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
          "aren't enabled. This may cause issues " +
          "when clients attempt to talk to a DataNode.");
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:BlockManager.java

示例12: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }


  remoteNNs = RemoteNameNodeInfo.getRemoteNameNodes(conf, nsId);
  // validate the configured NNs
  List<RemoteNameNodeInfo> remove = new ArrayList<RemoteNameNodeInfo>(remoteNNs.size());
  for (RemoteNameNodeInfo info : remoteNNs) {
    InetSocketAddress address = info.getIpcAddress();
    LOG.info("Found nn: " + info.getNameNodeID() + ", ipc: " + info.getIpcAddress());
    if (address.getPort() == 0 || address.getAddress().isAnyLocalAddress()) {
      LOG.error("Could not determine valid IPC address for other NameNode ("
          + info.getNameNodeID() + ") , got: " + address);
      remove.add(info);
    }
  }

  // remove any invalid nns
  remoteNNs.removeAll(remove);

  // make sure we have at least one left to read
  Preconditions.checkArgument(!remoteNNs.isEmpty(), "Could not find any valid namenodes!");

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:BootstrapStandby.java

示例13: format

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Verify that configured directories exist, then
 * Interactively confirm that formatting is desired 
 * for each existing directory and format them.
 * 
 * @param conf configuration to use
 * @param force if true, format regardless of whether dirs exist
 * @return true if formatting was aborted, false otherwise
 * @throws IOException
 */
private static boolean format(Configuration conf, boolean force,
    boolean isInteractive) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);
  checkAllowFormat(conf);

  if (UserGroupInformation.isSecurityEnabled()) {
    InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf);
    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
        DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  }
  
  Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
  List<URI> dirsToPrompt = new ArrayList<URI>();
  dirsToPrompt.addAll(nameDirsToFormat);
  dirsToPrompt.addAll(sharedDirs);
  List<URI> editDirsToFormat = 
               FSNamesystem.getNamespaceEditsDirs(conf);

  // if clusterID is not provided - see if you can find the current one
  String clusterId = StartupOption.FORMAT.getClusterId();
  if(clusterId == null || clusterId.equals("")) {
    //Generate a new cluster id
    clusterId = NNStorage.newClusterID();
  }
  System.out.println("Formatting using clusterid: " + clusterId);
  
  FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
  try {
    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
    fsImage.getEditLog().initJournalsForWrite();

    if (!fsImage.confirmFormat(force, isInteractive)) {
      return true; // aborted
    }

    fsImage.format(fsn, clusterId);
  } catch (IOException ioe) {
    LOG.warn("Encountered exception during format: ", ioe);
    fsImage.close();
    throw ioe;
  }
  return false;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:57,代码来源:NameNode.java

示例14: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) throws IOException {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
      String errMessage = "Security is enabled but block access tokens " +
          "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
          "aren't enabled. This may cause issues " +
          "when clients attempt to connect to a DataNode. Aborting NameNode";
      throw new IOException(errMessage);
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    // figure out which index we are of the nns
    Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId);
    String nnId = HAUtil.getNameNodeId(conf, nsId);
    int nnIndex = 0;
    for (String id : nnIds) {
      if (id.equals(nnId)) {
        break;
      }
      nnIndex++;
    }
    return new BlockTokenSecretManager(updateMin * 60 * 1000L,
        lifetimeMin * 60 * 1000L, nnIndex, nnIds.size(), null, encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, 1, null, encryptionAlgorithm);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:55,代码来源:BlockManager.java

示例15: createBlockTokenSecretManager

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
private static BlockTokenSecretManager createBlockTokenSecretManager(
    final Configuration conf) {
  final boolean isEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled);

  if (!isEnabled) {
    if (UserGroupInformation.isSecurityEnabled()) {
     LOG.error("Security is enabled but block access tokens " +
      "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
      "aren't enabled. This may cause issues " +
      "when clients attempt to talk to a DataNode.");
    }
    return null;
  }

  final long updateMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
  final long lifetimeMin = conf.getLong(
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, 
      DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
  final String encryptionAlgorithm = conf.get(
      DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY
      + "=" + updateMin + " min(s), "
      + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY
      + "=" + lifetimeMin + " min(s), "
      + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY
      + "=" + encryptionAlgorithm);
  
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);

  if (isHaEnabled) {
    String thisNnId = HAUtil.getNameNodeId(conf, nsId);
    String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId);
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null,
        encryptionAlgorithm);
  } else {
    return new BlockTokenSecretManager(updateMin*60*1000L,
        lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:47,代码来源:BlockManager.java


注:本文中的org.apache.hadoop.hdfs.HAUtil.getNameNodeId方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。