当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getConfiguration方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getConfiguration方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getConfiguration方法的具体用法?Java MiniDFSCluster.getConfiguration怎么用?Java MiniDFSCluster.getConfiguration使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getConfiguration方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: evaluate

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Override
public void evaluate() throws Throwable {
  MiniDFSCluster miniHdfs = null;
  Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
  if (Boolean.parseBoolean(System.getProperty(HADOOP_MINI_HDFS, "true"))) {
    miniHdfs = startMiniHdfs(conf);
    conf = miniHdfs.getConfiguration(0);
  }
  try {
    HDFS_CONF_TL.set(conf);
    HDFS_TEST_DIR_TL.set(resetHdfsTestDir(conf));
    statement.evaluate();
  } finally {
    HDFS_CONF_TL.remove();
    HDFS_TEST_DIR_TL.remove();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestHdfsHelper.java

示例2: testTokenStoreHdfs

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testTokenStoreHdfs() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  conf = cluster.getConfiguration(0);
  try {
    testTokenStore("/tmp/historystore");
  } finally {
      cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestHistoryServerFileSystemStateStoreService.java

示例3: testMoverCli

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test Mover Cli by specifying a list of files/directories using option "-p".
 * There is only one namenode (and hence name service) specified in the conf.
 */
@Test
public void testMoverCli() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration()).numDataNodes(0).build();
  try {
    final Configuration conf = cluster.getConfiguration(0);
    try {
      Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
      Assert.fail("Expected exception for illegal path bar");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("bar is not absolute", e);
    }

    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    Assert.assertNull(movePaths.get(nn));

    movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
    namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, movePaths.size());
    nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestMover.java

示例4: testMultipleSecondaryNamenodes

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Starts two namenodes and two secondary namenodes, verifies that secondary
 * namenodes are configured correctly to talk to their respective namenodes
 * and can do the checkpoint.
 * 
 * @throws IOException
 */
@Test
public void testMultipleSecondaryNamenodes() throws IOException {
  Configuration conf = new HdfsConfiguration();
  String nameserviceId1 = "ns1";
  String nameserviceId2 = "ns2";
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
      + "," + nameserviceId2);
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary1 = null;
  SecondaryNameNode secondary2 = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
            conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
        .build();
    Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
    Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
    InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
        .getNameNodeAddress();
    InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
        .getNameNodeAddress();
    String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
    String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

    // Set the Service Rpc address to empty to make sure the node specific
    // setting works
    snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
    snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

    // Set the nameserviceIds
    snConf1.set(DFSUtil.addKeySuffixes(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
        nn1);
    snConf2.set(DFSUtil.addKeySuffixes(
        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
        nn2);

    secondary1 = startSecondaryNameNode(snConf1);
    secondary2 = startSecondaryNameNode(snConf2);

    // make sure the two secondary namenodes are talking to correct namenodes.
    assertEquals(secondary1.getNameNodeAddress().getPort(),
        nn1RpcAddress.getPort());
    assertEquals(secondary2.getNameNodeAddress().getPort(),
        nn2RpcAddress.getPort());
    assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
        .getNameNodeAddress().getPort());

    // both should checkpoint.
    secondary1.doCheckpoint();
    secondary2.doCheckpoint();
  } finally {
    cleanup(secondary1);
    secondary1 = null;
    cleanup(secondary2);
    secondary2 = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestCheckpoint.java

示例5: testDfsAdminDeleteBlockPool

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
        "namesServerId1,namesServerId2");
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
          conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
      .numDataNodes(1).build();

    cluster.waitActive();

    FileSystem fs1 = cluster.getFileSystem(0);
    FileSystem fs2 = cluster.getFileSystem(1);

    DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
    DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);

    DataNode dn1 = cluster.getDataNodes().get(0);

    String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
    String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
    
    File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
    File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
    
    Configuration nn1Conf = cluster.getConfiguration(0);
    nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
    dn1.refreshNamenodes(nn1Conf);
    assertEquals(1, dn1.getAllBpOs().length);
    
    DFSAdmin admin = new DFSAdmin(nn1Conf);
    String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
    String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
    
    int ret = admin.run(args);
    assertFalse(0 == ret);

    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
    
    String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
    ret = admin.run(forceArgs);
    assertEquals(0, ret);
    
    verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
    
    //bpid1 remains good
    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestDeleteBlockPool.java

示例6: testDNWithInvalidStorageWithHA

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
  MiniDFSNNTopology top = new MiniDFSNNTopology()
    .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
      .addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1"))
      .addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));

  top.setFederation(true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top)
      .numDataNodes(0).build();
  try {
    cluster.startDataNodes(conf, 1, true, null, null);
    // let the initialization be complete
    Thread.sleep(10000);
    DataNode dn = cluster.getDataNodes().get(0);
    assertTrue("Datanode should be running", dn.isDatanodeUp());
    assertEquals("BPOfferService should be running", 1,
        dn.getAllBpOs().length);
    DataNodeProperties dnProp = cluster.stopDataNode(0);

    cluster.getNameNode(0).stop();
    cluster.getNameNode(1).stop();
    Configuration nn1 = cluster.getConfiguration(0);
    Configuration nn2 = cluster.getConfiguration(1);
    // setting up invalid cluster
    StartupOption.FORMAT.setClusterId("cluster-2");
    DFSTestUtil.formatNameNode(nn1);
    MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),
        FSNamesystem.getNamespaceDirs(nn2), nn2);
    cluster.restartNameNode(0, false);
    cluster.restartNameNode(1, false);
    cluster.restartDataNode(dnProp);
    
    // let the initialization be complete
    Thread.sleep(10000);
    dn = cluster.getDataNodes().get(0);
    assertFalse("Datanode should have shutdown as only service failed",
        dn.isDatanodeUp());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestDataNodeMultipleRegistrations.java

示例7: testInitializeBKSharedEdits

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
 * the edits log segments to new bkjm shared edits.
 * 
 * @throws Exception
 */
@Test
public void testInitializeBKSharedEdits() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);

    MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
        .numDataNodes(0).build();
    cluster.waitActive();
    // Shutdown and clear the current filebased shared dir.
    cluster.shutdownNameNodes();
    File shareddir = new File(cluster.getSharedEditsDir(0, 1));
    assertTrue("Initial Shared edits dir not fully deleted",
        FileUtil.fullyDelete(shareddir));

    // Check namenodes should not start without shared dir.
    assertCanNotStartNamenode(cluster, 0);
    assertCanNotStartNamenode(cluster, 1);

    // Configure bkjm as new shared edits dir in both namenodes
    Configuration nn1Conf = cluster.getConfiguration(0);
    Configuration nn2Conf = cluster.getConfiguration(1);
    nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
        .createJournalURI("/initializeSharedEdits").toString());
    BKJMUtil.addJournalManagerDefinition(nn1Conf);
    BKJMUtil.addJournalManagerDefinition(nn2Conf);

    // Initialize the BKJM shared edits.
    assertFalse(NameNode.initializeSharedEdits(nn1Conf));

    // NameNode should be able to start and should be in sync with BKJM as
    // shared dir
    assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestBookKeeperAsHASharedDir.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getConfiguration方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。