当前位置: 首页>>代码示例>>Java>>正文


Java NetworkTopology.getFirstHalf方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetworkTopology.getFirstHalf方法的典型用法代码示例。如果您正苦于以下问题:Java NetworkTopology.getFirstHalf方法的具体用法?Java NetworkTopology.getFirstHalf怎么用?Java NetworkTopology.getFirstHalf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.NetworkTopology的用法示例。


在下文中一共展示了NetworkTopology.getFirstHalf方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: chooseRemoteRack

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
protected void chooseRemoteRack(int numOfReplicas,
    DatanodeDescriptor localMachine, Set<Node> excludedNodes,
    long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
    throws NotEnoughReplicasException {
  int oldNumOfReplicas = results.size();

  final String rackLocation = NetworkTopology.getFirstHalf(
      localMachine.getNetworkLocation());
  try {
    // randomly choose from remote racks
    chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e) {
    // fall back to the local rack
    chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
        rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例2: chooseRemoteRack

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
@Override
protected void chooseRemoteRack(int numOfReplicas,
    DatanodeDescriptor localMachine, Set<Node> excludedNodes,
    long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
    throws NotEnoughReplicasException {
  int oldNumOfReplicas = results.size();

  final String rackLocation = NetworkTopology.getFirstHalf(
      localMachine.getNetworkLocation());
  try {
    // randomly choose from remote racks
    chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e) {
    // fall back to the local rack
    chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
        rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例3: chooseLocalRack

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
@Override
protected DatanodeStorageInfo chooseLocalRack(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local rack, but off-nodegroup
  try {
    final String scope = NetworkTopology.getFirstHalf(localMachine.getNetworkLocation());
    return chooseRandom(scope, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e1) {
    // find the second replica
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getRack(newLocal.getNetworkLocation()), excludedNodes,
            blocksize, maxNodesPerRack, results, avoidStaleNodes,
            storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例4: getRack

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
@Override
protected String getRack(final DatanodeInfo cur) {
  String nodeGroupString = cur.getNetworkLocation();
  return NetworkTopology.getFirstHalf(nodeGroupString);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例5: testBalancerWithRackLocality

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test rack locality for balancer policy. 
 */
@Test(timeout=60000)
public void testBalancerWithRackLocality() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP1};
  
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);
    
    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
    
    DatanodeInfo[] datanodeReport = 
            client.getDatanodeReport(DatanodeReportType.ALL);
    
    Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
    for (DatanodeInfo datanode: datanodeReport) {
      String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
      int usedCapacity = (int) datanode.getDfsUsed();
       
      if (rackToUsedCapacity.get(rack) != null) {
        rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
      } else {
        rackToUsedCapacity.put(rack, usedCapacity);
      }
    }
    assertEquals(rackToUsedCapacity.size(), 2);
    assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));
    
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:66,代码来源:TestBalancerWithNodeGroup.java


注:本文中的org.apache.hadoop.net.NetworkTopology.getFirstHalf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。