当前位置: 首页>>代码示例>>Java>>正文


Java NetworkTopology.add方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetworkTopology.add方法的典型用法代码示例。如果您正苦于以下问题:Java NetworkTopology.add方法的具体用法?Java NetworkTopology.add怎么用?Java NetworkTopology.add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.NetworkTopology的用法示例。


在下文中一共展示了NetworkTopology.add方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testLocality

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
@Test
public void testLocality() throws Exception {
  NetworkTopology nt = new NetworkTopology();

  Node r1n1 = new NodeBase("/default/rack1/node1");
  nt.add(r1n1);
  Node r1n2 = new NodeBase("/default/rack1/node2");
  nt.add(r1n2);

  Node r2n3 = new NodeBase("/default/rack2/node3");
  nt.add(r2n3);

  LOG.debug("r1n1 parent: " + r1n1.getParent() + "\n" +
            "r1n2 parent: " + r1n2.getParent() + "\n" +
            "r2n3 parent: " + r2n3.getParent());

  // Same host
  assertEquals(0, JobInProgress.getMatchingLevelForNodes(r1n1, r1n1, 3));
  // Same rack
  assertEquals(1, JobInProgress.getMatchingLevelForNodes(r1n1, r1n2, 3));
  // Different rack
  assertEquals(2, JobInProgress.getMatchingLevelForNodes(r1n1, r2n3, 3));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestJobInProgress.java

示例2: initTest

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private VerifiablePolicy initTest() throws Exception {
  VerifiablePolicy policy = new VerifiablePolicy();
  Configuration conf = new Configuration();
  TestClusterStats stats = new TestClusterStats();
  NetworkTopology clusterMap = new NetworkTopology();
  TestHostsReader hostsReader = new TestHostsReader();
  TestMapping dnsToSwitchMapping = new TestMapping();

  for (DatanodeDescriptor d: dataNodes) {
    clusterMap.add(d);
  }

  conf.setInt("dfs.replication.rackwindow", 2);
  conf.setInt("dfs.replication.machineWindow", 2);

  policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null);
  return policy;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:TestConfigurableBlockPlacement.java

示例3: addNodes

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for (DatanodeDescriptor dn : nodesToAdd) {
    cluster.add(dn);
    dn.getStorageInfos()[0].setUtilizationForTesting(
        2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
    dn.updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0,
        null);
    bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestBlockManager.java

示例4: addNodes

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for (DatanodeDescriptor dn : nodesToAdd) {
    cluster.add(dn);
    dn.getStorageInfos()[0].setUtilizationForTesting(
        2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
    dn.updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0,
        null);
    bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestBlockManager.java

示例5: addNodes

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for (DatanodeDescriptor dn : nodesToAdd) {
    cluster.add(dn);
    dn.updateHeartbeat(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
    bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:12,代码来源:TestBlockManager.java

示例6: addNodes

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for (DatanodeDescriptor dn : nodesToAdd) {
    cluster.add(dn);
    dn.getStorageInfos()[0].setUtilizationForTesting(
        2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L);
    dn.updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0);
    bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
  }
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:14,代码来源:TestBlockManager.java

示例7: addNodes

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd)
    throws IOException {
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for (DatanodeDescriptor dn : nodesToAdd) {
    cluster.add(dn);
    dn.updateHeartbeat(2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,
        0L, 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE, 0L, 0, 0);
    bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn);
    bm.getDatanodeManager().addDnToStorageMapInDB(dn);
    bm.getDatanodeManager().addDatanode(dn);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:TestBlockManager.java

示例8: testChooseTargetWithTopology

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestBlockStoragePolicy.java

示例9: testChooseSsdOverDisk

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestBlockStoragePolicy.java

示例10: testFsckMisPlacedReplicas

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestFsck.java

示例11: testFsckMisPlacedReplicas

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    fsck.check(pathString, file, replRes, ecRes);
    // check misReplicatedBlock number.
    assertEquals(replRes.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:75,代码来源:TestFsck.java

示例12: testFsckMisPlacedReplicas

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, REPL_FACTOR, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:74,代码来源:TestFsck.java

示例13: testChooseTarget

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
public void testChooseTarget() throws Exception {
  VerifiablePolicy policy = new VerifiablePolicy();
  Configuration conf = new Configuration();
  TestClusterStats stats = new TestClusterStats();
  NetworkTopology clusterMap = new NetworkTopology();
  TestHostsReader hostsReader = new TestHostsReader();
  TestMapping dnsToSwitchMapping = new TestMapping();

  for (DatanodeDescriptor d: dataNodes) {
    clusterMap.add(d);
  }

  conf.setInt("dfs.replication.rackwindow", 2);
  conf.setInt("dfs.replication.machineWindow", 2);

  policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null);

  HashMap <Node, Node> emptyMap = new HashMap<Node, Node>();
  List <DatanodeDescriptor> results = new ArrayList <DatanodeDescriptor>();
  DatanodeDescriptor writer = dataNodes[0];

  // Replication Factor 2
  DatanodeDescriptor fwriter = policy.chooseTarget(2, writer, emptyMap,
      512, 4, results, true);

  assertEquals(writer.getNetworkLocation(), fwriter.getNetworkLocation());
  assertEquals(writer.getNetworkLocation(),
      results.get(0).getNetworkLocation());
  assertEquals(results.get(0).getNetworkLocation(),
      results.get(1).getNetworkLocation());
  assertFalse(results.get(0).getHost().equals(
      results.get(1).getHost()));

  results.clear();
  emptyMap.clear();
  writer = dataNodes[0];

  // Replication Factor 3
  fwriter = policy.chooseTarget(3, writer, emptyMap,
      512, 4, results, true);

  assertEquals(writer.getNetworkLocation(), fwriter.getNetworkLocation());
  assertEquals(writer.getNetworkLocation(),
      results.get(0).getNetworkLocation());
  assertEquals(results.get(1).getNetworkLocation(),
      results.get(2).getNetworkLocation());
  assertFalse(results.get(0).getNetworkLocation().equals(
      results.get(1).getNetworkLocation()));
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:50,代码来源:TestConfigurableBlockPlacement.java

示例14: testFindBest

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
public void testFindBest() throws Exception {
  VerifiablePolicy policy = new VerifiablePolicy();
  Configuration conf = new Configuration();
  TestClusterStats stats = new TestClusterStats();
  NetworkTopology clusterMap = new NetworkTopology();
  TestHostsReader hostsReader = new TestHostsReader();
  TestMapping dnsToSwitchMapping = new TestMapping();


  for (DatanodeDescriptor d: dataNodes) {
    clusterMap.add(d);
  }

  conf.setInt("dfs.replication.rackwindow", 2);
  conf.setInt("dfs.replication.machineWindow", 2);

  policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null);

  DatanodeDescriptor[] r;

  r = policy.findBest(Arrays.asList(
      dataNodes[2],
      dataNodes[9],
      dataNodes[10],
      dataNodes[11],
      dataNodes[12],
      dataNodes[8],
      dataNodes[7]));
  assertEquals(dataNodes[2],r[0]);
  assertEquals(dataNodes[8],r[1]);
  assertEquals(dataNodes[7],r[2]);

  conf.setInt("dfs.replication.rackwindow", 1);
  conf.setInt("dfs.replication.machineWindow", 2);

  policy.initialize(conf, stats, clusterMap, hostsReader, dnsToSwitchMapping, null);

  r = policy.findBest(Arrays.asList(
      dataNodes[2],
      dataNodes[9],
      dataNodes[11]));
  assertEquals(dataNodes[2],r[0]);
  assertNull(r[1]);
  assertNull(r[2]);

  r = policy.findBest(Arrays.asList(
      dataNodes[2],
      dataNodes[6],
      dataNodes[9],
      dataNodes[12]));
  assertNull(r[0]);
  assertEquals(dataNodes[9],r[1]);
  assertEquals(dataNodes[12],r[2]);

  r = policy.findBest(Arrays.asList(
      dataNodes[2],
      dataNodes[4],
      dataNodes[9],
      dataNodes[12]));
  assertEquals(dataNodes[2],r[0]);
  assertEquals(dataNodes[4],r[1]);
  assertNull(r[2]);

}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:65,代码来源:TestConfigurableBlockPlacement.java


注:本文中的org.apache.hadoop.net.NetworkTopology.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。