当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.getDatanodeDescriptor方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.getDatanodeDescriptor方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.getDatanodeDescriptor方法的具体用法?Java DFSTestUtil.getDatanodeDescriptor怎么用?Java DFSTestUtil.getDatanodeDescriptor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.getDatanodeDescriptor方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCreateInvalidTopology

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testCreateInvalidTopology() throws Exception {
  NetworkTopology invalCluster = new NetworkTopology();
  DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
  };
  invalCluster.add(invalDataNodes[0]);
  invalCluster.add(invalDataNodes[1]);
  try {
    invalCluster.add(invalDataNodes[2]);
    fail("expected InvalidTopologyException");
  } catch (NetworkTopology.InvalidTopologyException e) {
    assertTrue(e.getMessage().startsWith("Failed to add "));
    assertTrue(e.getMessage().contains(
        "You cannot have a rack and a non-rack node at the same " +
        "level of the network topology."));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestNetworkTopology.java

示例2: testChooseTarget5

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * In this testcase, client is is a node outside of file system.
 * So the 1st replica can be placed on any node. 
 * the 2nd replica should be placed on a different rack,
 * the 3rd replica should be placed on the same rack as the 2nd replica,
 * @throws Exception
 */
@Test
public void testChooseTarget5() throws Exception {
  DatanodeDescriptor writerDesc =
    DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0, writerDesc);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1, writerDesc);
  assertEquals(targets.length, 1);

  targets = chooseTarget(2, writerDesc);
  assertEquals(targets.length, 2);
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3, writerDesc);
  assertEquals(targets.length, 3);
  assertTrue(isOnSameRack(targets[1], targets[2]));
  assertFalse(isOnSameRack(targets[0], targets[1]));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestReplicationPolicy.java

示例3: setupDatanodes

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Before
public void setupDatanodes() {
  dataNodes = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
      DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
      DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),        
  };
  for (int i = 0; i < dataNodes.length; i++) {
    cluster.add(dataNodes[i]);
  }
  dataNodes[9].setDecommissioned();
  dataNodes[10].setDecommissioned();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestNetworkTopology.java

示例4: testContains

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testContains() throws Exception {
  DatanodeDescriptor nodeNotInMap = 
    DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
  for (int i=0; i < dataNodes.length; i++) {
    assertTrue(cluster.contains(dataNodes[i]));
  }
  assertFalse(cluster.contains(nodeNotInMap));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestNetworkTopology.java

示例5: getDatanodeDescriptor

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
    String rackLocation, DatanodeStorage storage, String hostname) {
    DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname);
    if (storage != null) {
      dn.updateStorage(storage);
    }
    return dn;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:BlockManagerTestUtil.java

示例6: setup

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Before
public void setup() {
  dataNodes = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
  };
  for (DatanodeDescriptor node : dataNodes) {
    map.add(node);
  }
  map.add(null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestHost2NodesMap.java

示例7: testContains

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testContains() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  for (int i = 0; i < dataNodes.length; i++) {
    assertTrue(map.contains(dataNodes[i]));
  }
  assertFalse(map.contains(null));
  assertFalse(map.contains(nodeNotInMap));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestHost2NodesMap.java

示例8: testRemove

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testRemove() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  assertFalse(map.remove(nodeNotInMap));
  
  assertTrue(map.remove(dataNodes[0]));
  assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
  assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
  DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
  assertTrue(node==dataNodes[2] || node==dataNodes[3]);
  assertNull(map.getDatanodeByHost("4.4.4.4"));
  
  assertTrue(map.remove(dataNodes[2]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
  
  assertTrue(map.remove(dataNodes[3]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertNull(map.getDatanodeByHost("3.3.3.3"));
  
  assertFalse(map.remove(null));
  assertTrue(map.remove(dataNodes[1]));
  assertFalse(map.remove(dataNodes[1]));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestHost2NodesMap.java

示例9: doTestOneOfTwoRacksDecommissioned

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
  // Block originally on A1, A2, B1
  List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
  List<DatanodeDescriptor> origNodes = getNodes(origStorages);
  BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
  
  // Decommission all of the nodes in rack A
  List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
  
  DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
  assertTrue("Source of replication should be one of the nodes the block " +
      "was on. Was: " + pipeline[0],
      origStorages.contains(pipeline[0]));
  // Only up to two nodes can be picked per rack when there are two racks.
  assertEquals("Should have two targets", 2, pipeline.length);
  
  boolean foundOneOnRackB = false;
  for (int i = 1; i < pipeline.length; i++) {
    DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
    if (rackB.contains(target)) {
      foundOneOnRackB = true;
    }
    assertFalse(decomNodes.contains(target));
    assertFalse(origNodes.contains(target));
  }
  
  assertTrue("Should have at least one target on rack B. Pipeline: " +
      Joiner.on(",").join(pipeline),
      foundOneOnRackB);
  
  // Mark the block as received on the target nodes in the pipeline
  fulfillPipeline(blockInfo, pipeline);

  // the block is still under-replicated. Add a new node. This should allow
  // the third off-rack replica.
  DatanodeDescriptor rackCNode =
    DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
  rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
  addNodes(ImmutableList.of(rackCNode));
  try {
    DatanodeStorageInfo[] pipeline2 = scheduleSingleReplication(blockInfo);
    assertEquals(2, pipeline2.length);
    assertEquals(rackCNode, pipeline2[1].getDatanodeDescriptor());
  } finally {
    removeNode(rackCNode);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestBlockManager.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.getDatanodeDescriptor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。