当前位置: 首页>>代码示例>>Java>>正文


Java NameNode.initializeGenericKeys方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNode.initializeGenericKeys方法的典型用法代码示例。如果您正苦于以下问题:Java NameNode.initializeGenericKeys方法的具体用法?Java NameNode.initializeGenericKeys怎么用?Java NameNode.initializeGenericKeys使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.NameNode的用法示例。


在下文中一共展示了NameNode.initializeGenericKeys方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: create

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DFSZKFailoverController.java

示例2: testGetOtherNNHttpAddress

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Test
public void testGetOtherNNHttpAddress() throws IOException {
  // Use non-local addresses to avoid host address matching
  Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1");

  // This is done by the NN before the StandbyCheckpointer is created
  NameNode.initializeGenericKeys(conf, "ns1", "nn1");

  // Since we didn't configure the HTTP address, and the default is
  // 0.0.0.0, it should substitute the address from the RPC configuration
  // above.
  StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
  assertEquals(new URL("http", "1.2.3.2",
      DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
      checkpointer.getActiveNNAddress());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestHAConfiguration.java

示例3: parseConfAndFindOtherNN

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:BootstrapStandby.java

示例4: getConfForOtherNode

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
 * Given the configuration for this node, return a Configuration object for
 * the other node in an HA setup.
 * 
 * @param myConf the configuration of this node
 * @return the configuration of the other node in an HA setup
 */
public static Configuration getConfForOtherNode(
    Configuration myConf) {
  
  String nsId = DFSUtil.getNamenodeNameServiceId(myConf);
  String otherNn = getNameNodeIdOfOtherNode(myConf, nsId);
  
  // Look up the address of the active NN.
  Configuration confForOtherNode = new Configuration(myConf);
  NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn);
  return confForOtherNode;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:HAUtil.java

示例5: testConfModificationNoFederationOrHa

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
 * Ensure that fs.defaultFS is set in the configuration even if neither HA nor
 * Federation is enabled.
 * 
 * Regression test for HDFS-3351.
 */
@Test
public void testConfModificationNoFederationOrHa() {
  final HdfsConfiguration conf = new HdfsConfiguration();
  String nsId = null;
  String nnId = null;
  
  conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");

  assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
  NameNode.initializeGenericKeys(conf, nsId, nnId);
  assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestDFSUtil.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.NameNode.initializeGenericKeys方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。