当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.getFirstBlock方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getFirstBlock方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getFirstBlock方法的具体用法?Java DFSTestUtil.getFirstBlock怎么用?Java DFSTestUtil.getFirstBlock使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.getFirstBlock方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testVerifyBlockChecksumCommand

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testVerifyBlockChecksumCommand() throws Exception {
  DFSTestUtil.createFile(fs, new Path("/bar"), 1234, (short) 1, 0xdeadbeef);
  FsDatasetSpi<?> fsd = datanode.getFSDataset();
  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/bar"));
  File blockFile = getBlockFile(fsd,
      block.getBlockPoolId(), block.getLocalBlock());
  assertEquals("ret: 1, You must specify a meta file with -meta",
      runCmd(new String[]{"verify", "-block", blockFile.getAbsolutePath()}));
  File metaFile = getMetaFile(fsd,
      block.getBlockPoolId(), block.getLocalBlock());
  assertEquals("ret: 0, Checksum type: " +
        "DataChecksum(type=CRC32C, chunkSize=512)",
      runCmd(new String[]{"verify",
          "-meta", metaFile.getAbsolutePath()}));
  assertEquals("ret: 0, Checksum type: " +
        "DataChecksum(type=CRC32C, chunkSize=512)" +
        "Checksum verification succeeded on block file " +
        blockFile.getAbsolutePath(),
      runCmd(new String[]{"verify",
          "-meta", metaFile.getAbsolutePath(),
          "-block", blockFile.getAbsolutePath()})
  );
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestDebugAdmin.java

示例2: testInvalidateOverReplicatedBlock

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例3: testWhenDecreasingReplication

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestProcessCorruptBlocks.java

示例4: testByAddingAnExtraDataNode

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this test, the above 
 * condition is achieved by increasing the number of good replicas by 
 * replicating on a new Datanode. 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file  of replication factor 3
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica 
 *     (corrupt replica should not be removed since number of good replicas
 *      (2) is less  than replication factor (3)) 
 *   Start a new data node 
 *   Verify that the a new replica is created and corrupt replica is
 *   removed.
 * 
 */
@Test
public void testByAddingAnExtraDataNode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();
  DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    cluster.restartDataNode(dnPropsFourth);

    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    assertEquals(3, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestProcessCorruptBlocks.java

示例5: testSetrepIncWithUnderReplicatedBlocks

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000) // 1 min timeout
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final short REPLICATION_FACTOR = 2;
  final String FILE_NAME = "/testFile";
  final Path FILE_PATH = new Path(FILE_NAME);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
  try {
    // create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
    
    // remove one replica from the blocksMap so block becomes under-replicated
    // but the block does not get put into the under-replicated blocks queue
    final BlockManager bm = cluster.getNamesystem().getBlockManager();
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
    DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock())
        .iterator().next().getDatanodeDescriptor();
    bm.addToInvalidates(b.getLocalBlock(), dn);
    Thread.sleep(5000);
    bm.blocksMap.removeNode(b.getLocalBlock(), dn);
    
    // increment this file's replication factor
    FsShell shell = new FsShell(conf);
    assertEquals(0, shell.run(new String[]{
        "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
  } finally {
    cluster.shutdown();
  }
  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestUnderReplicatedBlocks.java

示例6: testSufficientlyReplBlocksUsesNewRack

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testSufficientlyReplBlocksUsesNewRack() throws Exception {
  Configuration conf = getConf();
  final short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  // All datanodes are on the same rack
  String racks[] = {"/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();

  try {
    // Create a file with one block with a replication factor of 3
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

    // Add a new datanode on a different rack
    String newRacks[] = {"/rack2"};
    cluster.startDataNodes(conf, 1, true, null, newRacks);
    cluster.waitActive();

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestBlocksWithNotEnoughRacks.java

示例7: testUnderReplicatedUsesNewRacks

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testUnderReplicatedUsesNewRacks() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  // All datanodes are on the same rack
  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
    
    // Add new datanodes on a different rack and increase the
    // replication factor so the block is underreplicated and make
    // sure at least one of the hosts on the new rack is used. 
    String newRacks[] = {"/rack2", "/rack2"};
    cluster.startDataNodes(conf, 2, true, null, newRacks);
    REPLICATION_FACTOR = 5;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestBlocksWithNotEnoughRacks.java

示例8: testReduceReplFactorRespectsRackPolicy

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testReduceReplFactorRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decrease the replication factor, make sure the deleted replica
    // was not the one that lived on the rack with only one replica,
    // ie we should still have 2 racks after reducing the repl factor.
    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); 

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestBlocksWithNotEnoughRacks.java

示例9: testChangedStorageId

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testChangedStorageId() throws IOException, URISyntaxException,
    InterruptedException {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .build();
  
  try {
    cluster.transitionToActive(0);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    OutputStream out = fs.create(filePath);
    out.write("foo bar baz".getBytes());
    out.close();
    
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    
    // Change the gen stamp of the block on datanode to go back in time (gen
    // stamps start at 1000)
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
    assertTrue(cluster.changeGenStampOfBlock(0, block, 900));
    
    // Stop the DN so the replica with the changed gen stamp will be reported
    // when this DN starts up.
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    
    // Restart the namenode so that when the DN comes up it will see an initial
    // block report.
    cluster.restartNameNode(1, false);
    assertTrue(cluster.restartDataNode(dnProps, true));
    
    // Wait until the standby NN queues up the corrupt block in the pending DN
    // message queue.
    while (cluster.getNamesystem(1).getBlockManager()
        .getPendingDataNodeMessageCount() < 1) {
      ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
    }
    
    assertEquals(1, cluster.getNamesystem(1).getBlockManager()
        .getPendingDataNodeMessageCount());
    String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
    
    // Reformat/restart the DN.
    assertTrue(wipeAndRestartDn(cluster, 0));
    
    // Give the DN time to start up and register, which will cause the
    // DatanodeManager to dissociate the old storage ID from the DN xfer addr.
    String newStorageId = "";
    do {
      ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
      newStorageId = getRegisteredDatanodeUid(cluster, 1);
      System.out.println("====> oldStorageId: " + oldStorageId +
          " newStorageId: " + newStorageId);
    } while (newStorageId.equals(oldStorageId));
    
    assertEquals(0, cluster.getNamesystem(1).getBlockManager()
        .getPendingDataNodeMessageCount());
    
    // Now try to fail over.
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:TestPendingCorruptDnMessages.java

示例10: testBlockIdCKCorruption

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestFsck.java

示例11: testWithReplicationFactorAsOne

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. The above condition should hold
 * true as long as there is one good replica. This test verifies that.
 * 
 * The test strategy : 
 *   Bring up Cluster with 2 DataNodes
 *   Create a file of replication factor 2 
 *   Corrupt one replica of a block of the file 
 *   Verify that there is  one good replicas and 1 corrupt replica 
 *     (corrupt replica should not be removed since number of good 
 *     replicas (1) is less than replication factor (2)).
 *   Set the replication factor to 1 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica should  be removed since number of good
 *      replicas (1) is equal to replication factor (1))
 */
@Test(timeout=20000)
public void testWithReplicationFactorAsOne() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 1);

    assertEquals(1, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 1);

    // wait for 3 seconds so that all block reports are processed.
    for (int i = 0; i < 10; i++) {
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {
      }
      if (countReplicas(namesystem, block).corruptReplicas() == 0) {
        break;
      }
    }

    assertEquals(1, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestProcessCorruptBlocks.java

示例12: testHostsExcludeInUI

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testHostsExcludeInUI() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    String names = name + "\n" + "localhost:42\n";
    LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName =
        new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
    String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
    assertTrue("Live nodes should contain the decommissioned node",
        nodes.contains("Decommissioned"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:TestHostsFiles.java

示例13: testNumberOfBlocksToBeReplicated

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * The test verifies the number of outstanding replication requests for a
 * given DN shouldn't exceed the limit set by configuration property
 * dfs.namenode.replication.max-streams-hard-limit.
 * The test does the followings:
 * 1. Create a mini cluster with 2 DNs. Set large heartbeat interval so that
 *    replication requests won't be picked by any DN right away.
 * 2. Create a file with 10 blocks and replication factor 2. Thus each
 *    of the 2 DNs have one replica of each block.
 * 3. Add a DN to the cluster for later replication.
 * 4. Remove a DN that has data.
 * 5. Ask BlockManager to compute the replication work. This will assign
 *    replication requests to the only DN that has data.
 * 6. Make sure the number of pending replication requests of that DN don't
 *    exceed the limit.
 * @throws Exception
 */
@Test(timeout=60000) // 1 min timeout
public void testNumberOfBlocksToBeReplicated() throws Exception {
  Configuration conf = new HdfsConfiguration();

  conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);

  // Large value to make sure the pending replication request can stay in
  // DatanodeDescriptor.replicateBlocks before test timeout.
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 100);

  // Make sure BlockManager can pull all blocks from UnderReplicatedBlocks via
  // chooseUnderReplicatedBlocks at once.
   conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, 5);

  int NUM_OF_BLOCKS = 10;
  final short REP_FACTOR = 2;
  final String FILE_NAME = "/testFile";
  final Path FILE_PATH = new Path(FILE_NAME);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
          REP_FACTOR).build();
  try {
    // create a file with 10 blocks with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, FILE_PATH, NUM_OF_BLOCKS, REP_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REP_FACTOR);

    cluster.startDataNodes(conf, 1, true, null, null, null, null);

    final BlockManager bm = cluster.getNamesystem().getBlockManager();
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
    Iterator<DatanodeStorageInfo> storageInfos =
        bm.blocksMap.getStorages(b.getLocalBlock())
        .iterator();
    DatanodeDescriptor firstDn = storageInfos.next().getDatanodeDescriptor();
    DatanodeDescriptor secondDn = storageInfos.next().getDatanodeDescriptor();

    bm.getDatanodeManager().removeDatanode(firstDn);

    assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks());
    bm.computeDatanodeWork();


    assertTrue("The number of blocks to be replicated should be less than "
        + "or equal to " + bm.replicationStreamsHardLimit,
        secondDn.getNumberOfBlocksToBeReplicated()
        <= bm.replicationStreamsHardLimit);
  } finally {
    cluster.shutdown();
  }

}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestUnderReplicatedBlocks.java

示例14: testBlockInvalidationWhenRBWReplicaMissedInDN

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test when a block's replica is removed from RBW folder in one of the
 * datanode, namenode should ask to invalidate that corrupted block and
 * schedule replication for one more replica for that under replicated block.
 */
@Test(timeout=600000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
    throws IOException, InterruptedException {
  // This test cannot pass on Windows due to file locking enforcement.  It will
  // reject the attempt to delete the block file from the RBW folder.
  assumeTrue(!Path.WINDOWS);

  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
      .build();
  FSDataOutputStream out = null;
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
    out = fs.create(testPath, (short) 2);
    out.writeBytes("HDFS-3157: " + testPath);
    out.hsync();
    cluster.startDataNodes(conf, 1, true, null, null, null);
    String bpid = namesystem.getBlockPoolId();
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
    Block block = blk.getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(0);

    // Delete partial block and its meta information from the RBW folder
    // of first datanode.
    File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
    File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
    assertTrue("Could not delete the block file from the RBW folder",
        blockFile.delete());
    assertTrue("Could not delete the block meta file from the RBW folder",
        metaFile.delete());

    out.close();
    
    int liveReplicas = 0;
    while (true) {
      if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
        // This confirms we have a corrupt replica
        LOG.info("Live Replicas after corruption: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be less than 2 replicas in the "
        + "liveReplicasMap", 1, liveReplicas);
    
    while (true) {
      if ((liveReplicas =
            countReplicas(namesystem, blk).liveReplicas()) > 1) {
        //Wait till the live replica count becomes equal to Replication Factor
        LOG.info("Live Replicas after Rereplication: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be two live replicas", 2, liveReplicas);

    while (true) {
      Thread.sleep(100);
      if (countReplicas(namesystem, blk).corruptReplicas() == 0) {
        LOG.info("Corrupt Replicas becomes 0");
        break;
      }
    }
  } finally {
    if (out != null) {
      out.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:82,代码来源:TestRBWBlockInvalidation.java

示例15: testCorruptBlockRereplicatedAcrossRacks

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  int fileLen = 512;
  final Path filePath = new Path("/testFile");
  // Datanodes are spread across two racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    
    DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L);
    final String fileContent = DFSTestUtil.readFile(fs, filePath);

    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Corrupt a replica of the block
    int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
    assertTrue(cluster.corruptReplica(dnToCorrupt, b));

    // Restart the datanode so blocks are re-scanned, and the corrupt
    // block is detected.
    cluster.restartDataNode(dnToCorrupt);

    // Wait for the namenode to notice the corrupt replica
    DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);

    // The rack policy is still respected
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Ensure all replicas are valid (the corrupt replica may not
    // have been cleaned up yet).
    for (int i = 0; i < racks.length; i++) {
      String blockContent = cluster.readBlockOnDataNode(i, b);
      if (blockContent != null && i != dnToCorrupt) {
        assertEquals("Corrupt replica", fileContent, blockContent);
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestBlocksWithNotEnoughRacks.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.getFirstBlock方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。