当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.createDatanodeStorageInfos方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.createDatanodeStorageInfos方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.createDatanodeStorageInfos方法的具体用法?Java DFSTestUtil.createDatanodeStorageInfos怎么用?Java DFSTestUtil.createDatanodeStorageInfos使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.createDatanodeStorageInfos方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupMockCluster

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Before
public void setupMockCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "need to set a dummy value here so it assumes a multi-rack cluster");
  fsn = Mockito.mock(FSNamesystem.class);
  Mockito.doReturn(true).when(fsn).hasWriteLock();
  bm = new BlockManager(fsn, conf);
  final String[] racks = {
      "/rackA",
      "/rackA",
      "/rackA",
      "/rackB",
      "/rackB",
      "/rackB"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  nodes = Arrays.asList(DFSTestUtil.toDatanodeDescriptor(storages));
  rackA = nodes.subList(0, 3);
  rackB = nodes.subList(3, 6);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestBlockManager.java

示例2: setupCluster

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  final String[] racks = {
      "/rack1",
      "/rack1",
      "/rack1",
      "/rack2",
      "/rack2",
      "/rack2"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
  DFSTestUtil.formatNameNode(conf);
  namenode = new NameNode(conf);
  int blockSize = 1024;

  dnrList = new ArrayList<DatanodeRegistration>();
  dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

  // Register DNs
  for (int i=0; i < 6; i++) {
    DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
        new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
        VersionInfo.getVersion());
    dnrList.add(dnr);
    dnManager.registerDatanode(dnr);
    dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
    dataNodes[i].updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
        0L, 0L, 0, 0, null);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestReplicationPolicyConsiderLoad.java

示例3: setupCluster

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@BeforeClass
  public static void setupCluster() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final String[] racks = {
        "/d1/r1",
        "/d1/r1",
        "/d1/r2",
        "/d1/r2",
        "/d2/r3",
        "/d2/r3"};
    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);

    // create an extra storage for dn5.
    DatanodeStorage extraStorage = new DatanodeStorage(
        storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
        StorageType.DEFAULT);
/*    DatanodeStorageInfo si = new DatanodeStorageInfo(
        storages[5].getDatanodeDescriptor(), extraStorage);
*/
    BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
        extraStorage);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        new File(baseDir, "name").getPath());

    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);

    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    replicator = bm.getBlockPlacementPolicy();
    cluster = bm.getDatanodeManager().getNetworkTopology();
    // construct network topology
    for (int i=0; i < NUM_OF_DATANODES; i++) {
      cluster.add(dataNodes[i]);
      bm.getDatanodeManager().getHeartbeatManager().addDatanode(
          dataNodes[i]);
    }
    resetHeartbeatForStorages();
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestReplicationPolicy.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.createDatanodeStorageInfos方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。