当前位置: 首页>>代码示例>>Java>>正文


Java HAUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HAUtil的典型用法代码示例。如果您正苦于以下问题:Java HAUtil类的具体用法?Java HAUtil怎么用?Java HAUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HAUtil类属于org.apache.hadoop.hdfs包,在下文中一共展示了HAUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doRollback

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
@VisibleForTesting
public static boolean doRollback(Configuration conf,
    boolean isConfirmationNeeded) throws IOException {
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);
  String namenodeId = HAUtil.getNameNodeId(conf, nsId);
  initializeGenericKeys(conf, nsId, namenodeId);

  FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
  System.err.print(
      "\"rollBack\" will remove the current state of the file system,\n"
      + "returning you to the state prior to initiating your recent.\n"
      + "upgrade. This action is permanent and cannot be undone. If you\n"
      + "are performing a rollback in an HA environment, you should be\n"
      + "certain that no NameNode process is running on any host.");
  if (isConfirmationNeeded) {
    if (!confirmPrompt("Roll back file system state?")) {
      System.err.println("Rollback aborted.");
      return true;
    }
  }
  nsys.getFSImage().doRollback(nsys);
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:NameNode.java

示例2: rollingRollback

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FSImage.java

示例3: create

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:DFSZKFailoverController.java

示例4: fetchImage

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/**
 * Download the most recent fsimage from the name node, and save it to a local
 * file in the given directory.
 * 
 * @param argv
 *          List of of command line parameters.
 * @param idx
 *          The index of the command that is being processed.
 * @return an exit code indicating success or failure.
 * @throws IOException
 */
public int fetchImage(final String[] argv, final int idx) throws IOException {
  Configuration conf = getConf();
  final URL infoServer = DFSUtil.getInfoServer(
      HAUtil.getAddressOfActive(getDFS()), conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
          new File(argv[idx]));
      return null;
    }
  });
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:DFSAdmin.java

示例5: metaSave

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSAdmin.java

示例6: testWrappedFailoverProxyProvider

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestDFSClientFailover.java

示例7: setupCluster

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  cluster.transitionToActive(0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestXAttrsWithHA.java

示例8: setupCluster

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
@Before
public void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  
  MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  shutdownClusterAndRemoveSharedEditsDir();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestInitializeSharedEdits.java

示例9: setupCluster

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
@Before
public void setupCluster() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  HAUtil.setAllowStandbyReads(conf, true);
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .waitSafeMode(false)
    .build();
  cluster.waitActive();
  
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  cluster.transitionToActive(0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestQuotasWithHA.java

示例10: setUpHaCluster

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
private void setUpHaCluster(boolean security) throws Exception {
  conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      security);
  cluster = new MiniQJMHACluster.Builder(conf).build();
  setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(),
      cluster.getDfsCluster().getNameNode(1).getHostAndPort());
  cluster.getDfsCluster().getNameNode(0).getHostAndPort();
  admin = new DFSAdmin();
  admin.setConf(conf);
  assertTrue(HAUtil.isHAEnabled(conf, "ns1"));

  originOut = System.out;
  originErr = System.err;
  System.setOut(new PrintStream(out));
  System.setErr(new PrintStream(err));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSAdminWithHA.java

示例11: getRemoteNameNodes

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf, String nsId)
    throws IOException {
  // there is only a single NN configured (and no federation) so we don't have any more NNs
  if (nsId == null) {
    return Collections.emptyList();
  }
  List<Configuration> otherNodes = HAUtil.getConfForOtherNodes(conf);
  List<RemoteNameNodeInfo> nns = new ArrayList<RemoteNameNodeInfo>();

  for (Configuration otherNode : otherNodes) {
    String otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
    // don't do any validation here as in some cases, it can be overwritten later
    InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true);


    final String scheme = DFSUtil.getHttpClientScheme(conf);
    URL otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(otherIpcAddr.getHostName(),
        otherNode, scheme).toURL();

    nns.add(new RemoteNameNodeInfo(otherNode, otherNNId, otherIpcAddr, otherHttpAddr));
  }
  return nns;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:RemoteNameNodeInfo.java

示例12: setNameNodeAddresses

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/**
 * Determine the address of the NN we are checkpointing
 * as well as our own HTTP address from the configuration.
 * @throws IOException 
 */
private void setNameNodeAddresses(Configuration conf) throws IOException {
  // Look up our own address.
  myNNAddress = getHttpAddress(conf);

  // Look up the active node's address
  List<Configuration> confForActive = HAUtil.getConfForOtherNodes(conf);
  activeNNAddresses = new ArrayList<URL>(confForActive.size());
  for (Configuration activeConf : confForActive) {
    URL activeNNAddress = getHttpAddress(activeConf);

    // sanity check each possible active NN
    Preconditions.checkArgument(checkAddress(activeNNAddress),
        "Bad address for active NN: %s", activeNNAddress);

    activeNNAddresses.add(activeNNAddress);
  }

  // Sanity-check.
  Preconditions.checkArgument(checkAddress(myNNAddress), "Bad address for standby NN: %s",
      myNNAddress);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:StandbyCheckpointer.java

示例13: rollingRollback

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  // HDFS-7939: purge all old fsimage_rollback_*
  archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:FSImage.java

示例14: metaSave

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:DFSAdmin.java

示例15: cloneHaNnCredentials

import org.apache.hadoop.hdfs.HAUtil; //导入依赖的package包/类
/**
 * Clones the delegation token to individual host behind the same logical address.
 *
 * @param config the hadoop configuration
 * @throws IOException if failed to get information for the current user.
 */
public static void cloneHaNnCredentials(Configuration config) throws IOException {
  String scheme = URI.create(config.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
                                        CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)).getScheme();

  // Loop through all name services. Each name service could have multiple name node associated with it.
  for (Map.Entry<String, Map<String, InetSocketAddress>> entry : DFSUtil.getHaNnRpcAddresses(config).entrySet()) {
    String nsId = entry.getKey();
    Map<String, InetSocketAddress> addressesInNN = entry.getValue();
    if (!HAUtil.isHAEnabled(config, nsId) || addressesInNN == null || addressesInNN.isEmpty()) {
      continue;
    }

    // The client may have a delegation token set for the logical
    // URI of the cluster. Clone this token to apply to each of the
    // underlying IPC addresses so that the IPC code can find it.
    URI uri = URI.create(scheme + "://" + nsId);

    LOG.info("Cloning delegation token for uri {}", uri);
    HAUtil.cloneDelegationTokenForLogicalUri(UserGroupInformation.getCurrentUser(), uri, addressesInNN.values());
  }
}
 
开发者ID:apache,项目名称:twill,代码行数:28,代码来源:YarnUtils.java


注:本文中的org.apache.hadoop.hdfs.HAUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。