当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.waitClusterUp方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.waitClusterUp方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.waitClusterUp方法的具体用法?Java MiniDFSCluster.waitClusterUp怎么用?Java MiniDFSCluster.waitClusterUp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.waitClusterUp方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testAuditLogger

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that AuditLogger works as expected.
 */
@Test
public void testAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    assertEquals(1, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestAuditLogger.java

示例2: testDisableTopAuditLogger

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that TopAuditLogger can be disabled
 */
@Test
public void testDisableTopAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    List<AuditLogger> auditLoggers =
        cluster.getNameNode().getNamesystem().getAuditLoggers();
    for (AuditLogger auditLogger : auditLoggers) {
      assertFalse(
          "top audit logger is still hooked in after it is disabled",
          auditLogger instanceof TopAuditLogger);
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestAuditLogger.java

示例3: testAuditLoggerWithSetPermission

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Minor test related to HADOOP-9155. Verify that during a
 * FileSystem.setPermission() operation, the stat passed in during the
 * logAuditEvent() call returns the new permission rather than the old
 * permission.
 */
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    final Path p = new Path("/");
    fs.setTimes(p, time, time);
    fs.setPermission(p, new FsPermission(TEST_PERMISSION));
    assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
    assertEquals(2, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestAuditLogger.java

示例4: testBrokenLogger

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that a broken audit logger causes requests to fail.
 */
@Test
public void testBrokenLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      BrokenAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    fail("Expected exception due to broken audit logger.");
  } catch (RemoteException re) {
    // Expected.
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestAuditLogger.java

示例5: testFsckMissingReplicas

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestFsck.java

示例6: testFsckMisPlacedReplicas

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestFsck.java

示例7: testBlockIdCK

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test for blockIdCK
 */

@Test
public void testBlockIdCK() throws Exception {

  final short REPL_FACTOR = 2;
  short NUM_DN = 2;
  final long blockSize = 512;

  String [] racks = {"/rack1", "/rack2"};
  String [] hosts = {"host1", "host2"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
      .racks(racks).build();

  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);

  DFSTestUtil util = new DFSTestUtil.Builder().
    setName(getClass().getSimpleName()).setNumFiles(1).build();
  //create files
  final String pathString = new String("/testfile");
  final Path path = new Path(pathString);
  util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
  util.waitReplication(dfs, path, REPL_FACTOR);
  StringBuilder sb = new StringBuilder();
  for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
    sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
  }
  String[] bIds = sb.toString().split(" ");

  //run fsck
  try {
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
        "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));

    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:TestFsck.java

示例8: testBlockIdCKCorruption

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestFsck.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.waitClusterUp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。