当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.newHAConfiguration方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.newHAConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.newHAConfiguration方法的具体用法?Java DFSTestUtil.newHAConfiguration怎么用?Java DFSTestUtil.newHAConfiguration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.newHAConfiguration方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMultipleNamespacesConfigured

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestWebHDFSForHA.java

示例2: testHA

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testHA() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(0);

    final Path dir = new Path("/test");
    Assert.assertTrue(fs.mkdirs(dir));

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);

    final Path dir2 = new Path("/test2");
    Assert.assertTrue(fs.mkdirs(dir2));
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestWebHDFSForHA.java

示例3: testSecureHAToken

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestWebHDFSForHA.java

示例4: testFailoverAfterOpen

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
      "://" + LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestWebHDFSForHA.java

示例5: testDeserializeHAToken

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testDeserializeHAToken() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  final Token<DelegationTokenIdentifier> token = new
      Token<DelegationTokenIdentifier>();
  QueryStringDecoder decoder = new QueryStringDecoder(
    WebHdfsHandler.WEBHDFS_PREFIX + "/?"
    + NamenodeAddressParam.NAME + "=" + LOGICAL_NAME + "&"
    + DelegationParam.NAME + "=" + token.encodeToUrlString());
  ParameterParser testParser = new ParameterParser(decoder, conf);
  final Token<DelegationTokenIdentifier> tok2 = testParser.delegationToken();
  Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestParameterParser.java

示例6: testRetryWhileNNStartup

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Make sure the WebHdfsFileSystem will retry based on RetriableException when
 * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
 */
@Test (timeout=120000)
public void testRetryWhileNNStartup() throws Exception {
  final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();
    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();
    cluster.transitionToActive(0);

    final NameNode namenode = cluster.getNameNode(0);
    final NamenodeProtocols rpcServer = namenode.getRpcServer();
    Whitebox.setInternalState(namenode, "rpcServer", null);

    new Thread() {
      @Override
      public void run() {
        boolean result = false;
        FileSystem fs = null;
        try {
          fs = FileSystem.get(WEBHDFS_URI, conf);
          final Path dir = new Path("/test");
          result = fs.mkdirs(dir);
        } catch (IOException e) {
          result = false;
        } finally {
          IOUtils.cleanup(null, fs);
        }
        synchronized (TestWebHDFSForHA.this) {
          resultMap.put("mkdirs", result);
          TestWebHDFSForHA.this.notifyAll();
        }
      }
    }.start();

    Thread.sleep(1000);
    Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
    synchronized (this) {
      while (!resultMap.containsKey("mkdirs")) {
        this.wait();
      }
      Assert.assertTrue(resultMap.get("mkdirs"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestWebHDFSForHA.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.newHAConfiguration方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。