当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.getAllBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getAllBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getAllBlocks方法的具体用法?Java DFSTestUtil.getAllBlocks怎么用?Java DFSTestUtil.getAllBlocks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.getAllBlocks方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testBlockIdGeneration

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test that block IDs are generated sequentially.
 *
 * @throws IOException
 */
@Test
public void testBlockIdGeneration() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // Create a file that is 10 blocks long.
    Path path = new Path("testBlockIdGeneration.dat");
    DFSTestUtil.createFile(
        fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
    LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
    long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;

    // Ensure that the block IDs are sequentially increasing.
    for (int i = 1; i < blocks.size(); ++i) {
      long nextBlockId = blocks.get(i).getBlock().getBlockId();
      LOG.info("Block" + i + " id is " + nextBlockId);
      assertThat(nextBlockId, is(nextBlockExpectedId));
      ++nextBlockExpectedId;
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestSequentialBlockId.java

示例2: testBlockIdCK

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test for blockIdCK
 */

@Test
public void testBlockIdCK() throws Exception {

  final short REPL_FACTOR = 2;
  short NUM_DN = 2;
  final long blockSize = 512;

  String [] racks = {"/rack1", "/rack2"};
  String [] hosts = {"host1", "host2"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
      .racks(racks).build();

  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);

  DFSTestUtil util = new DFSTestUtil.Builder().
    setName(getClass().getSimpleName()).setNumFiles(1).build();
  //create files
  final String pathString = new String("/testfile");
  final Path path = new Path(pathString);
  util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
  util.waitReplication(dfs, path, REPL_FACTOR);
  StringBuilder sb = new StringBuilder();
  for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
    sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
  }
  String[] bIds = sb.toString().split(" ");

  //run fsck
  try {
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
        "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));

    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:62,代码来源:TestFsck.java

示例3: testBlockIdCKCorruption

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestFsck.java

示例4: testTriggerBlockIdCollision

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test that collisions in the block ID space are handled gracefully.
 *
 * @throws IOException
 */
@Test
public void testTriggerBlockIdCollision() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    FSNamesystem fsn = cluster.getNamesystem();
    final int blockCount = 10;


    // Create a file with a few blocks to rev up the global block ID
    // counter.
    Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
    DFSTestUtil.createFile(
        fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);


    // Rewind the block ID counter in the name system object. This will result
    // in block ID collisions when we try to allocate new blocks.
    SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager()
      .getBlockIdGenerator();
    blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);

    // Trigger collisions by creating a new file.
    Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
    DFSTestUtil.createFile(
        fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
    assertThat(blocks2.size(), is(blockCount));

    // Make sure that file2 block IDs start immediately after file1
    assertThat(blocks2.get(0).getBlock().getBlockId(),
               is(blocks1.get(9).getBlock().getBlockId() + 1));

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestSequentialBlockId.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.getAllBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。