当前位置: 首页>>代码示例>>Java>>正文


Java HATestUtil.setFailoverConfigurations方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil.setFailoverConfigurations方法的典型用法代码示例。如果您正苦于以下问题:Java HATestUtil.setFailoverConfigurations方法的具体用法?Java HATestUtil.setFailoverConfigurations怎么用?Java HATestUtil.setFailoverConfigurations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil的用法示例。


在下文中一共展示了HATestUtil.setFailoverConfigurations方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultipleNamespacesConfigured

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestWebHDFSForHA.java

示例2: testWrappedFailoverProxyProvider

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Test to verify legacy proxy providers are correctly wrapped.
 */
@Test
public void testWrappedFailoverProxyProvider() throws Exception {
  // setup the config with the dummy provider class
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      DummyLegacyFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://" + logicalName + "/");

  // not to use IP address for token service
  SecurityUtil.setTokenServiceUseIp(false);

  // Logical URI should be used.
  assertTrue("Legacy proxy providers should use logical URI.",
      HAUtil.useLogicalUri(config, p.toUri()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestDFSClientFailover.java

示例3: testMoverCliWithHAConf

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test
public void testMoverCliWithHAConf() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration())
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0).build();
  HATestUtil.setFailoverConfigurations(cluster, conf, "MyCluster");
  try {
    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
        "-p", "/foo", "/bar");
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertEquals(new URI("hdfs://MyCluster"), nn);
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestMover.java

示例4: setup

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestNameNodeRetryCacheMetrics.java

示例5: initHAConf

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
private Configuration initHAConf(URI journalURI, Configuration conf, int numNNs) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());

  List<String> nns = new ArrayList<String>(numNNs);
  int port = basePort;
  for (int i = 0; i < numNNs; i++) {
    nns.add("127.0.0.1:" + port);
    // increment by 2 each time to account for the http port in the config setting
    port += 2;
  }

  // use standard failover configurations
  HATestUtil.setFailoverConfigurations(conf, NAMESERVICE, nns);
  return conf;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:MiniQJMHACluster.java

示例6: testHarUriWithHaUriWithNoPort

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Test that the HarFileSystem works with underlying HDFS URIs that have no
 * port specified, as is often the case with an HA setup.
 */
@Test
public void testHarUriWithHaUriWithNoPort() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .build();
    cluster.transitionToActive(0);
    HATestUtil.setFailoverConfigurations(cluster, conf);
    
    createEmptyHarArchive(HATestUtil.configureFailoverFs(cluster, conf),
        TEST_HAR_PATH);
    
    URI failoverUri = FileSystem.getDefaultUri(conf);
    Path p = new Path("har://hdfs-" + failoverUri.getAuthority() + TEST_HAR_PATH);
    p.getFileSystem(conf);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:TestHarFileSystemWithHA.java

示例7: testHA

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test
public void testHA() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(0);

    final Path dir = new Path("/test");
    Assert.assertTrue(fs.mkdirs(dir));

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);

    final Path dir2 = new Path("/test2");
    Assert.assertTrue(fs.mkdirs(dir2));
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestWebHDFSForHA.java

示例8: testSecureHAToken

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestWebHDFSForHA.java

示例9: testFailoverAfterOpen

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
      "://" + LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestWebHDFSForHA.java

示例10: testLogicalUriShouldNotHavePorts

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration config = new HdfsConfiguration(conf);
  String logicalName = HATestUtil.getLogicalHostname(cluster);
  HATestUtil.setFailoverConfigurations(cluster, config, logicalName);
  Path p = new Path("hdfs://" + logicalName + ":12345/");
  try {
    p.getFileSystem(config).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDFSClientFailover.java

示例11: testFormatShouldBeIgnoredForNonFileBasedDirs

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(DFS_BASE_DIR, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestAllowFormat.java

示例12: testFormatShouldBeIgnoredForNonFileBasedDirs

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Test to skip format for non file scheme directory configured
 *
 * @throws Exception
 */
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
  Configuration conf = new HdfsConfiguration();
  String logicalName = "mycluster";

  // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
  // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
  // is considered.
  String localhost = "127.0.0.1";
  InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
  InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
  HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);

  conf.set(DFS_NAMENODE_NAME_DIR_KEY,
      new File(hdfsDir, "name").getAbsolutePath());
  conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
  conf.set(DFSUtil.addKeySuffixes(
      DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
      DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
      + localhost + ":2181/ledgers");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");

  // An internal assert is added to verify the working of test
  NameNode.format(conf);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:32,代码来源:TestAllowFormat.java

示例13: testFailoverAfterOpen

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:37,代码来源:TestWebHDFSForHA.java

示例14: testRetryWhileNNStartup

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Make sure the WebHdfsFileSystem will retry based on RetriableException when
 * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
 */
@Test (timeout=120000)
public void testRetryWhileNNStartup() throws Exception {
  final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();
    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();
    cluster.transitionToActive(0);

    final NameNode namenode = cluster.getNameNode(0);
    final NamenodeProtocols rpcServer = namenode.getRpcServer();
    Whitebox.setInternalState(namenode, "rpcServer", null);

    new Thread() {
      @Override
      public void run() {
        boolean result = false;
        FileSystem fs = null;
        try {
          fs = FileSystem.get(WEBHDFS_URI, conf);
          final Path dir = new Path("/test");
          result = fs.mkdirs(dir);
        } catch (IOException e) {
          result = false;
        } finally {
          IOUtils.cleanup(null, fs);
        }
        synchronized (TestWebHDFSForHA.this) {
          resultMap.put("mkdirs", result);
          TestWebHDFSForHA.this.notifyAll();
        }
      }
    }.start();

    Thread.sleep(1000);
    Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
    synchronized (this) {
      while (!resultMap.containsKey("mkdirs")) {
        this.wait();
      }
      Assert.assertTrue(resultMap.get("mkdirs"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestWebHDFSForHA.java

示例15: testBalancerWithHANameNodes

import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; //导入方法依赖的package包/类
/**
 * Test a cluster with even distribution, then a new empty node is added to
 * the cluster. Test start a cluster with specified number of nodes, and fills
 * it to be 30% full (with a single file replicated identically to all
 * datanodes); It then adds one new empty node and starts balancing.
 */
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
  Configuration conf = new HdfsConfiguration();
  TestBalancer.initConf(conf);
  long newNodeCapacity = TestBalancer.CAPACITY; // new node's capacity
  String newNodeRack = TestBalancer.RACK2; // new node's rack
  // array of racks for original nodes in cluster
  String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
  // array of capacities of original nodes in cluster
  long[] capacities = new long[] { TestBalancer.CAPACITY,
      TestBalancer.CAPACITY };
  assertEquals(capacities.length, racks.length);
  int numOfDatanodes = capacities.length;
  NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
  nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
  Configuration copiedConf = new Configuration(conf);
  cluster = new MiniDFSCluster.Builder(copiedConf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(capacities.length)
      .racks(racks)
      .simulatedCapacities(capacities)
      .build();
  HATestUtil.setFailoverConfigurations(cluster, conf);
  try {
    cluster.waitActive();
    cluster.transitionToActive(1);
    Thread.sleep(500);
    client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        ClientProtocol.class).getProxy();
    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace
        / numOfDatanodes, (short) numOfDatanodes, 1);

    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack },
        new long[] { newNodeCapacity });
    totalCapacity += newNodeCapacity;
    TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client,
        cluster);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    assertEquals(1, namenodes.size());
    assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
    TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
        cluster, Balancer.Parameters.DEFAULT);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestBalancerWithHANameNodes.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil.setFailoverConfigurations方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。