当前位置: 首页>>代码示例>>Java>>正文


Java TestDatanodeBlockScanner.corruptReplica方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.TestDatanodeBlockScanner.corruptReplica方法的典型用法代码示例。如果您正苦于以下问题:Java TestDatanodeBlockScanner.corruptReplica方法的具体用法?Java TestDatanodeBlockScanner.corruptReplica怎么用?Java TestDatanodeBlockScanner.corruptReplica使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.TestDatanodeBlockScanner的用法示例。


在下文中一共展示了TestDatanodeBlockScanner.corruptReplica方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: corruptBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
static void corruptBlock(Block block, MiniDFSCluster dfs) throws IOException {
  boolean corrupted = false;
  for (int i = 0; i < NUM_DATANODES; i++) {
    corrupted |= TestDatanodeBlockScanner.corruptReplica(block, i, dfs);
  }
  assertTrue("could not corrupt block", corrupted);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:8,代码来源:TestRaidHar.java

示例2: corruptBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
static void corruptBlock(Block block, MiniDFSCluster dfs) 
    throws IOException {
  boolean corrupted = false;
  for (int i = 0; i < NUM_DATANODES; i++) {
    corrupted |= TestDatanodeBlockScanner.corruptReplica(block, i, dfs);
  }
  assertTrue("could not corrupt block: " + block, corrupted);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:TestReadConstruction.java

示例3: corruptBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
static void corruptBlock(String blockName, MiniDFSCluster dfs) throws IOException {
  boolean corrupted = false;
  for (int i = 0; i < NUM_DATANODES; i++) {
    corrupted |= TestDatanodeBlockScanner.corruptReplica(blockName, i, dfs);
  }
  assertTrue("could not corrupt block", corrupted);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:8,代码来源:TestFileCorruptions.java

示例4: corruptBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
static void corruptBlock(String blockName, MiniDFSCluster dfs) 
    throws IOException {
  boolean corrupted = false;
  for (int i = 0; i < NUM_DATANODES; i++) {
    corrupted |= TestDatanodeBlockScanner.corruptReplica(blockName, i, dfs);
  }
  assertTrue("could not corrupt block: " + blockName, corrupted);
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:9,代码来源:TestReadConstruction.java

示例5: testProcesOverReplicateBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
 * It make sure that it won't treat corrupt replicas as valid ones 
 * thus prevents NN deleting valid replicas but keeping
 * corrupt ones.
 */
public void testProcesOverReplicateBlock() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong("dfs.blockreport.intervalMsec", 1000L);
  conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
  FileSystem fs = cluster.getFileSystem();

  try {
    int namespaceId = cluster.getNameNode().getNamespaceID();
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)3);
    
    // corrupt the block on datanode 0
    Block block = DFSTestUtil.getFirstBlock(fs, fileName);
    TestDatanodeBlockScanner.corruptReplica(block, 0, cluster);
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    // remove block scanner log to trigger block scanning at startup
    // remove curr and prev
    File scanLogCurr = new File(cluster.getBlockDirectory("data1")
        .getParent(), "dncp_block_verification.log.curr");
    scanLogCurr.delete();
    File scanLogPrev = new File(cluster.getBlockDirectory("data1")
        .getParent(), "dncp_block_verification.log.prev");
    scanLogPrev.delete();
    
    // restart the datanode so the corrupt replica will be detected
    cluster.restartDataNode(dnProps);
    DFSTestUtil.waitReplication(fs, fileName, (short)2);
    
    final DatanodeID corruptDataNode = 
      cluster.getDataNodes().get(2).getDNRegistrationForNS(namespaceId);
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
    synchronized (namesystem.heartbeats) {
      // set live datanode's remaining space to be 0 
      // so they will be chosen to be deleted when over-replication occurs
      for (DatanodeDescriptor datanode : namesystem.heartbeats) {
        if (!corruptDataNode.equals(datanode)) {
          datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0);
        }
      }
    }
      
    // decrease the replication factor to 1; 
    namesystem.setReplication(fileName.toString(), (short)1);
    waitReplication(namesystem, block, (short)1);
    
    // corrupt one won't be chosen to be excess one
    // without 4910 the number of live replicas would be 0: block gets lost
    assertEquals(1, namesystem.countNodes(block).liveReplicas());

    // Test the case when multiple calls to setReplication still succeeds.
    System.out.println("Starting next test with file foo2.");
    final Path fileName2 = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName2, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName2, (short)3);
    LocatedBlocks lbs = namesystem.getBlockLocations(
               fileName2.toString(), 0, 10);
    Block firstBlock = lbs.get(0).getBlock();
    namesystem.setReplication(fileName2.toString(), (short)2);
    namesystem.setReplication(fileName2.toString(), (short)1);
    
    // wait upto one minute for excess replicas to get deleted. It is not
    // immediate because excess replicas are being handled asyncronously.
    waitReplication(namesystem, firstBlock, (short)1);
    assertEquals(1, namesystem.countNodes(firstBlock).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:76,代码来源:TestOverReplicatedBlocks.java

示例6: testProcesOverReplicateBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
 * It make sure that it won't treat corrupt replicas as valid ones 
 * thus prevents NN deleting valid replicas but keeping
 * corrupt ones.
 */
public void testProcesOverReplicateBlock() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong("dfs.blockreport.intervalMsec", 1000L);
  conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
  FileSystem fs = cluster.getFileSystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)3);
    
    // corrupt the block on datanode 0
    Block block = DFSTestUtil.getFirstBlock(fs, fileName);
    TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0);
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    // remove block scanner log to trigger block scanning
    File scanLog = new File(System.getProperty("test.build.data"),
        "dfs/data/data1/current/dncp_block_verification.log.curr");
    //wait for one minute for deletion to succeed;
    for(int i=0; !scanLog.delete(); i++) {
      assertTrue("Could not delete log file in one minute", i < 60);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    
    // restart the datanode so the corrupt replica will be detected
    cluster.restartDataNode(dnProps);
    DFSTestUtil.waitReplication(fs, fileName, (short)2);
    
    final DatanodeID corruptDataNode = 
      cluster.getDataNodes().get(2).dnRegistration;
    final FSNamesystem namesystem = FSNamesystem.getFSNamesystem();
    synchronized (namesystem.heartbeats) {
      // set live datanode's remaining space to be 0 
      // so they will be chosen to be deleted when over-replication occurs
      for (DatanodeDescriptor datanode : namesystem.heartbeats) {
        if (!corruptDataNode.equals(datanode)) {
          datanode.updateHeartbeat(100L, 100L, 0L, 0);
        }
      }
      
      // decrease the replication factor to 1; 
      namesystem.setReplication(fileName.toString(), (short)1);

      // corrupt one won't be chosen to be excess one
      // without 4910 the number of live replicas would be 0: block gets lost
      assertEquals(1, namesystem.countNodes(block).liveReplicas());
    }
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:60,代码来源:TestOverReplicatedBlocks.java

示例7: testProcesOverReplicateBlock

import org.apache.hadoop.hdfs.TestDatanodeBlockScanner; //导入方法依赖的package包/类
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
 * It make sure that it won't treat corrupt replicas as valid ones 
 * thus prevents NN deleting valid replicas but keeping
 * corrupt ones.
 */
public void testProcesOverReplicateBlock() throws IOException {
  Configuration conf = new Configuration();
  conf.setLong("dfs.blockreport.intervalMsec", 1000L);
  conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
  FileSystem fs = cluster.getFileSystem();

  try {
    int namespaceId = cluster.getNameNode().getNamespaceID();
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)3);
    
    // corrupt the block on datanode 0
    Block block = DFSTestUtil.getFirstBlock(fs, fileName);
    TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0, cluster);
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    // remove block scanner log to trigger block scanning
    File scanLog = new File(cluster.getBlockDirectory("data1").getParent(), "dncp_block_verification.log.curr");
    //wait for one minute for deletion to succeed;
    scanLog.delete();
    
    // restart the datanode so the corrupt replica will be detected
    cluster.restartDataNode(dnProps);
    DFSTestUtil.waitReplication(fs, fileName, (short)2);
    
    final DatanodeID corruptDataNode = 
      cluster.getDataNodes().get(2).getDNRegistrationForNS(namespaceId);
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
    synchronized (namesystem.heartbeats) {
      // set live datanode's remaining space to be 0 
      // so they will be chosen to be deleted when over-replication occurs
      for (DatanodeDescriptor datanode : namesystem.heartbeats) {
        if (!corruptDataNode.equals(datanode)) {
          datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0);
        }
      }
    }
      
    // decrease the replication factor to 1; 
    namesystem.setReplication(fileName.toString(), (short)1);
    waitReplication(namesystem, block, (short)1);
    
    // corrupt one won't be chosen to be excess one
    // without 4910 the number of live replicas would be 0: block gets lost
    assertEquals(1, namesystem.countNodes(block).liveReplicas());

    // Test the case when multiple calls to setReplication still succeeds.
    System.out.println("Starting next test with file foo2.");
    final Path fileName2 = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName2, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName2, (short)3);
    LocatedBlocks lbs = namesystem.getBlockLocations(
               fileName2.toString(), 0, 10);
    Block firstBlock = lbs.get(0).getBlock();
    namesystem.setReplication(fileName2.toString(), (short)2);
    namesystem.setReplication(fileName2.toString(), (short)1);
    
    // wait upto one minute for excess replicas to get deleted. It is not
    // immediate because excess replicas are being handled asyncronously.
    waitReplication(namesystem, firstBlock, (short)1);
    assertEquals(1, namesystem.countNodes(firstBlock).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:72,代码来源:TestOverReplicatedBlocks.java


注:本文中的org.apache.hadoop.hdfs.TestDatanodeBlockScanner.corruptReplica方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。