当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.getDatanodeCapacity方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getDatanodeCapacity方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getDatanodeCapacity方法的具体用法?Java DFSTestUtil.getDatanodeCapacity怎么用?Java DFSTestUtil.getDatanodeCapacity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.getDatanodeCapacity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initCluster

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDataNodeVolumeFailureReporting.java

示例2: testVolFailureStatsPreservedOnNNRestart

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test that the NN re-learns of volume failures after restart.
 */
@Test
public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
  // Bring up two more datanodes that can tolerate 1 failure
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();

  final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
      ).getDatanodeManager();
  long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);

  // Fail the first volume on both datanodes (we have to keep the 
  // third healthy so one node in the pipeline will not fail). 
  File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
  File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
  DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);

  Path file1 = new Path("/test1");
  DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
  DFSTestUtil.waitReplication(fs, file1, (short)2);
  ArrayList<DataNode> dns = cluster.getDataNodes();

  // The NN reports two volumes failures
  DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2, 
      origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
  checkAggregateFailuresAtNameNode(true, 2);
  checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
  checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());

  // After restarting the NN it still see the two failures
  cluster.restartNameNode(0);
  cluster.waitActive();
  DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
      origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
  checkAggregateFailuresAtNameNode(true, 2);
  checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
  checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestDataNodeVolumeFailureReporting.java

示例3: testConfigureMinValidVolumes

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
 * option, ie the DN shuts itself down when the number of failures
 * experienced drops below the tolerated amount.
 */
@Test
public void testConfigureMinValidVolumes() throws Exception {
  assumeTrue(!System.getProperty("os.name").startsWith("Windows"));

  // Bring up two additional datanodes that need both of their volumes
  // functioning in order to stay up.
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();
  final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
      ).getDatanodeManager();
  long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);

  // Fail a volume on the 2nd DN
  File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
  DataNodeTestUtils.injectDataDirFailure(dn2Vol1);

  // Should only get two replicas (the first DN and the 3rd)
  Path file1 = new Path("/test1");
  DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
  DFSTestUtil.waitReplication(fs, file1, (short)2);

  // Check that this single failure caused a DN to die.
  DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0, 
      origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);

  // If we restore the volume we should still only be able to get
  // two replicas since the DN is still considered dead.
  DataNodeTestUtils.restoreDataDirFromFailure(dn2Vol1);
  Path file2 = new Path("/test2");
  DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
  DFSTestUtil.waitReplication(fs, file2, (short)2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestDataNodeVolumeFailureToleration.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.getDatanodeCapacity方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。