当前位置: 首页>>代码示例>>Java>>正文


Java DataNodeTestUtils.getBlockFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.getBlockFile方法的典型用法代码示例。如果您正苦于以下问题:Java DataNodeTestUtils.getBlockFile方法的具体用法?Java DataNodeTestUtils.getBlockFile怎么用?Java DataNodeTestUtils.getBlockFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils的用法示例。


在下文中一共展示了DataNodeTestUtils.getBlockFile方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: corruptBlock

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Corrupt a block on a data node. Replace the block file content with content
 * of 1, 2, ...BLOCK_SIZE.
 * 
 * @param block
 *          the ExtendedBlock to be corrupted
 * @param dn
 *          the data node where the block needs to be corrupted
 * @throws FileNotFoundException
 * @throws IOException
 */
private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
    throws FileNotFoundException, IOException {
  final File f = DataNodeTestUtils.getBlockFile(
      dn, block.getBlockPoolId(), block.getLocalBlock());
  final RandomAccessFile raFile = new RandomAccessFile(f, "rw");
  final byte[] bytes = new byte[(int) BLOCK_SIZE];
  for (int i = 0; i < BLOCK_SIZE; i++) {
    bytes[i] = (byte) (i);
  }
  raFile.write(bytes);
  raFile.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestClientReportBadBlock.java

示例2: corruptBlock

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Corrupt a block on a data node. Replace the block file content with
 * content
 * of 1, 2, ...BLOCK_SIZE.
 *
 * @param block
 *     the ExtendedBlock to be corrupted
 * @param dn
 *     the data node where the block needs to be corrupted
 * @throws FileNotFoundException
 * @throws IOException
 */
private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
    throws FileNotFoundException, IOException {
  final File f = DataNodeTestUtils
      .getBlockFile(dn, block.getBlockPoolId(), block.getLocalBlock());
  final RandomAccessFile raFile = new RandomAccessFile(f, "rw");
  final byte[] bytes = new byte[(int) BLOCK_SIZE];
  for (int i = 0; i < BLOCK_SIZE; i++) {
    bytes[i] = (byte) (i);
  }
  raFile.write(bytes);
  raFile.close();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:TestClientReportBadBlock.java

示例3: testBlockInvalidationWhenRBWReplicaMissedInDN

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Test when a block's replica is removed from RBW folder in one of the
 * datanode, namenode should ask to invalidate that corrupted block and
 * schedule replication for one more replica for that under replicated block.
 */
@Test(timeout=600000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
    throws IOException, InterruptedException {
  // This test cannot pass on Windows due to file locking enforcement.  It will
  // reject the attempt to delete the block file from the RBW folder.
  assumeTrue(!Path.WINDOWS);

  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
      .build();
  FSDataOutputStream out = null;
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
    out = fs.create(testPath, (short) 2);
    out.writeBytes("HDFS-3157: " + testPath);
    out.hsync();
    cluster.startDataNodes(conf, 1, true, null, null, null);
    String bpid = namesystem.getBlockPoolId();
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
    Block block = blk.getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(0);

    // Delete partial block and its meta information from the RBW folder
    // of first datanode.
    File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
    File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
    assertTrue("Could not delete the block file from the RBW folder",
        blockFile.delete());
    assertTrue("Could not delete the block meta file from the RBW folder",
        metaFile.delete());

    out.close();
    
    int liveReplicas = 0;
    while (true) {
      if ((liveReplicas = countReplicas(namesystem, blk).liveReplicas()) < 2) {
        // This confirms we have a corrupt replica
        LOG.info("Live Replicas after corruption: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be less than 2 replicas in the "
        + "liveReplicasMap", 1, liveReplicas);
    
    while (true) {
      if ((liveReplicas =
            countReplicas(namesystem, blk).liveReplicas()) > 1) {
        //Wait till the live replica count becomes equal to Replication Factor
        LOG.info("Live Replicas after Rereplication: " + liveReplicas);
        break;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be two live replicas", 2, liveReplicas);

    while (true) {
      Thread.sleep(100);
      if (countReplicas(namesystem, blk).corruptReplicas() == 0) {
        LOG.info("Corrupt Replicas becomes 0");
        break;
      }
    }
  } finally {
    if (out != null) {
      out.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:82,代码来源:TestRBWBlockInvalidation.java

示例4: testBlockInvalidationWhenRBWReplicaMissedInDN

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Test when a block's replica is removed from RBW folder in one of the
 * datanode, namenode should ask to invalidate that corrupted block and
 * schedule replication for one more replica for that under replicated block.
 */
@Test(timeout=60000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
    throws IOException, InterruptedException {
  // This test cannot pass on Windows due to file locking enforcement.  It will
  // reject the attempt to delete the block file from the RBW folder.
  assumeTrue(!Path.WINDOWS);

  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
      .build();
  FSDataOutputStream out = null;
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
    out = fs.create(testPath, (short) 2);
    out.writeBytes("HDFS-3157: " + testPath);
    out.hsync();
    cluster.startDataNodes(conf, 1, true, null, null, null);
    String bpid = namesystem.getBlockPoolId();
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
    Block block = blk.getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(0);

    // Delete partial block and its meta information from the RBW folder
    // of first datanode.
    File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
    File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
    assertTrue("Could not delete the block file from the RBW folder",
        blockFile.delete());
    assertTrue("Could not delete the block meta file from the RBW folder",
        metaFile.delete());

    out.close();

    // Check datanode has reported the corrupt block.
    boolean isCorruptReported = false;
    while (!isCorruptReported) {
      if (countReplicas(namesystem, blk).corruptReplicas() > 0) {
        isCorruptReported = true;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be 1 replica in the corruptReplicasMap", 1,
        countReplicas(namesystem, blk).corruptReplicas());

    // Check the block has got replicated to another datanode.
    blk = DFSTestUtil.getFirstBlock(fs, testPath);
    boolean isReplicated = false;
    while (!isReplicated) {
      if (countReplicas(namesystem, blk).liveReplicas() > 1) {
        isReplicated = true;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be two live replicas", 2, countReplicas(
        namesystem, blk).liveReplicas());

    // sleep for 1 second, so that by this time datanode reports the corrupt
    // block after a live replica of block got replicated.
    Thread.sleep(1000);

    // Check that there is no corrupt block in the corruptReplicasMap.
    assertEquals("There should not be any replica in the corruptReplicasMap",
        0, countReplicas(namesystem, blk).corruptReplicas());
  } finally {
    if (out != null) {
      out.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:82,代码来源:TestRBWBlockInvalidation.java

示例5: testBlockInvalidationWhenRBWReplicaMissedInDN

import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; //导入方法依赖的package包/类
/**
 * Test when a block's replica is removed from RBW folder in one of the
 * datanode, namenode should ask to invalidate that corrupted block and
 * schedule replication for one more replica for that under replicated block.
 */
@Test(timeout = 300000)
public void testBlockInvalidationWhenRBWReplicaMissedInDN()
    throws IOException, InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 300);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  FSDataOutputStream out = null;
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
    out = fs.create(testPath, (short) 2);
    out.writeBytes("HDFS-3157: " + testPath);
    out.hsync();
    cluster.startDataNodes(conf, 1, true, null, null, null);
    String bpid = namesystem.getBlockPoolId();
    ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, testPath);
    Block block = blk.getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(0);

    // Delete partial block and its meta information from the RBW folder
    // of first datanode.
    File blockFile = DataNodeTestUtils.getBlockFile(dn, bpid, block);
    File metaFile = DataNodeTestUtils.getMetaFile(dn, bpid, block);
    assertTrue("Could not delete the block file from the RBW folder",
        blockFile.delete());
    assertTrue("Could not delete the block meta file from the RBW folder",
        metaFile.delete());

    out.close();

    // Check datanode has reported the corrupt block.
    boolean isCorruptReported = false;
    while (!isCorruptReported) {
      if (countReplicas(namesystem, blk).corruptReplicas() > 0) {
        isCorruptReported = true;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be 1 replica in the corruptReplicasMap", 1,
        countReplicas(namesystem, blk).corruptReplicas());

    // Check the block has got replicated to another datanode.
    blk = DFSTestUtil.getFirstBlock(fs, testPath);
    boolean isReplicated = false;
    while (!isReplicated) {
      if (countReplicas(namesystem, blk).liveReplicas() > 1) {
        isReplicated = true;
      }
      Thread.sleep(100);
    }
    assertEquals("There should be two live replicas", 2,
        countReplicas(namesystem, blk).liveReplicas());

    // sleep for 1 second, so that by this time datanode reports the corrupt
    // block after a live replica of block got replicated.
    Thread.sleep(1000);

    // Check that there is no corrupt block in the corruptReplicasMap.
    assertEquals("There should not be any replica in the corruptReplicasMap",
        0, countReplicas(namesystem, blk).corruptReplicas());
  } finally {
    if (out != null) {
      out.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:78,代码来源:TestRBWBlockInvalidation.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils.getBlockFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。