当前位置: 首页>>代码示例>>Java>>正文


Java NodeBase类代码示例

本文整理汇总了Java中org.apache.hadoop.net.NodeBase的典型用法代码示例。如果您正苦于以下问题:Java NodeBase类的具体用法?Java NodeBase怎么用?Java NodeBase使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NodeBase类属于org.apache.hadoop.net包,在下文中一共展示了NodeBase类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: OneBlockInfo

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len,
  String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
    topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i],
        NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
开发者ID:Tencent,项目名称:angel,代码行数:27,代码来源:BalanceInputFormat.java

示例2: OneBlockInfo

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len,
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i],
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:27,代码来源:CombineFileInputFormat.java

示例3: OneBlockInfo

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len, 
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], 
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last 
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:CombineFileInputFormat.java

示例4: chooseFromNextRack

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BlockPlacementPolicyDefault.java

示例5: coreResolve

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
private static Node coreResolve(String hostName) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(hostName);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    if (LOG.isDebugEnabled()) {
      LOG.debug("Couldn't resolve " + hostName + ". Falling back to "
          + NetworkTopology.DEFAULT_RACK);
    }
  } else {
    rName = rNameList.get(0);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Resolved " + hostName + " to " + rName);
    }
  }
  return new NodeBase(hostName, rName);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:RackResolver.java

示例6: chooseOnce

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
/**
 * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
 * Except that 1st replica prefer local storage.
 * @return local node of writer.
 */
private Node chooseOnce(int numOfReplicas,
                          Node writer,
                          final Set<Node> excludedNodes,
                          final long blocksize,
                          final int maxNodesPerRack,
                          final List<DatanodeStorageInfo> results,
                          final boolean avoidStaleNodes,
                          EnumMap<StorageType, Integer> storageTypes)
                          throws NotEnoughReplicasException {
  if (numOfReplicas == 0) {
    return writer;
  }
  writer = chooseLocalStorage(writer, excludedNodes, blocksize,
      maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
      .getDatanodeDescriptor();
  if (--numOfReplicas == 0) {
    return writer;
  }
  chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
      maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  return writer;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:BlockPlacementPolicyRackFaultTolerant.java

示例7: OneBlockInfo

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len, 
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file ystem does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], NetworkTopology.DEFAULT_RACK)).
                                      toString();
    }
  }

  // The topology paths have the host name included as the last 
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:CombineFileInputFormat.java

示例8: testLocality

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
@Test
public void testLocality() throws Exception {
  NetworkTopology nt = new NetworkTopology();

  Node r1n1 = new NodeBase("/default/rack1/node1");
  nt.add(r1n1);
  Node r1n2 = new NodeBase("/default/rack1/node2");
  nt.add(r1n2);

  Node r2n3 = new NodeBase("/default/rack2/node3");
  nt.add(r2n3);

  LOG.debug("r1n1 parent: " + r1n1.getParent() + "\n" +
            "r1n2 parent: " + r1n2.getParent() + "\n" +
            "r2n3 parent: " + r2n3.getParent());

  // Same host
  assertEquals(0, JobInProgress.getMatchingLevelForNodes(r1n1, r1n1, 3));
  // Same rack
  assertEquals(1, JobInProgress.getMatchingLevelForNodes(r1n1, r1n2, 3));
  // Different rack
  assertEquals(2, JobInProgress.getMatchingLevelForNodes(r1n1, r2n3, 3));
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:24,代码来源:TestJobInProgress.java

示例9: OneBlockInfo

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
OneBlockInfo(Path path, long offset, long len,
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file ystem does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], NetworkTopology.DEFAULT_RACK)).
                                      toString();
    }
  }

  // The topology paths have the host name included as the last
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:CombineFileInputFormat.java

示例10: chooseLocalNode

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
protected DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           HashMap<Node, Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  Node oldNode = excludedNodes.put(localMachine, localMachine);
  if (oldNode == null) { // was not in the excluded list
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:BlockPlacementPolicyDefault.java

示例11: SplitInfo

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
SplitInfo(TaskSplitMetaInfo taskSplitMetaInfo,String []topologyPath){
		
	this.taskSplitMetaInfo = taskSplitMetaInfo;
	this.length = taskSplitMetaInfo.getInputDataLength();
	this.hosts  = taskSplitMetaInfo.getLocations();
	
	assert(hosts.length==topologyPath.length||topologyPath.length==0);
	
	//if this fs does not have any rack information,use default rack
	if(topologyPath==null||topologyPath.length==0){
	  topologyPath = new String[hosts.length];	
	 	for(int i=0;i<hosts.length;i++){
	 		topologyPath[i]=(new NodeBase(hosts[i],NetworkTopology.DEFAULT_RACK)).toString();
	 	}	
	}
	
	//the topology pahts have the host name as the last component,strip it
	this.racks = new String[hosts.length];
	for(int i=0;i<racks.length;i++){
		
		this.racks[i]=(new NodeBase(topologyPath[i])).getNetworkLocation();
	}
	
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:25,代码来源:TaskDataProvision.java

示例12: chooseLocalNode

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
private DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine,
    HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeDescriptor> results, boolean avoidStaleNodes)
    throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes);
  }
  if (preferLocalNode) {
    // otherwise try local machine first
    Node oldNode = excludedNodes.put(localMachine, localMachine);
    if (oldNode == null) { // was not in the excluded list
      if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false,
          results, avoidStaleNodes)) {
        results.add(localMachine);
        return localMachine;
      }
    }
  }
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, blocksize,
      maxNodesPerRack, results, avoidStaleNodes);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:BlockPlacementPolicyDefault.java

示例13: chooseLocalNode

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
private DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           List<Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  if (!excludedNodes.contains(localMachine)) {
    excludedNodes.add(localMachine);
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:27,代码来源:ReplicationTargetChooser.java

示例14: chooseLocalNode

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
private DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           HashMap<Node, Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  Node oldNode = excludedNodes.put(localMachine, localMachine);
  if (oldNode == null) { // was not in the excluded list
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:27,代码来源:BlockPlacementPolicyDefault.java

示例15: resolveAndGetNode

import org.apache.hadoop.net.NodeBase; //导入依赖的package包/类
private Node resolveAndGetNode(String name) {
  List <String> rNameList = dnsToSwitchMapping.resolve(Arrays.asList(new String [] {name}));
  String networkLoc = NodeBase.normalize(rNameList.get(0));
  Node node = null;

  // we depend on clusterMap to get a canonical node object
  // we synchronize this section to guarantee that two concurrent
  // insertions into the clusterMap don't happen (resulting in
  // multiple copies of the same node being created and returned)
  synchronized (clusterMap) {
    while ((node = clusterMap.getNode(networkLoc+"/"+name)) == null) {
      clusterMap.add(new NodeBase(name, networkLoc));
    }
  }

  return node;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:18,代码来源:TopologyCache.java


注:本文中的org.apache.hadoop.net.NodeBase类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。