当前位置: 首页>>代码示例>>Java>>正文


Java Node类代码示例

本文整理汇总了Java中org.apache.hadoop.net.Node的典型用法代码示例。如果您正苦于以下问题:Java Node类的具体用法?Java Node怎么用?Java Node使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Node类属于org.apache.hadoop.net包,在下文中一共展示了Node类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCaching

import org.apache.hadoop.net.Node; //导入依赖的package包/类
@Test
public void testCaching() {
  Configuration conf = new Configuration();
  conf.setClass(
    CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
    MyResolver.class, DNSToSwitchMapping.class);
  RackResolver.init(conf);
  try {
    InetAddress iaddr = InetAddress.getByName("host1");
    MyResolver.resolvedHost1 = iaddr.getHostAddress();
  } catch (UnknownHostException e) {
    // Ignore if not found
  }
  Node node = RackResolver.resolve("host1");
  Assert.assertEquals("/rack1", node.getNetworkLocation());
  node = RackResolver.resolve("host1");
  Assert.assertEquals("/rack1", node.getNetworkLocation());
  node = RackResolver.resolve(invalidHost);
  Assert.assertEquals(NetworkTopology.DEFAULT_RACK, node.getNetworkLocation());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestRackResolver.java

示例2: RMNodeImpl

import org.apache.hadoop.net.Node; //导入依赖的package包/类
public RMNodeImpl(NodeId nodeId, RMContext context, String hostName,
    int cmPort, int httpPort, Node node, Resource capability, String nodeManagerVersion) {
  this.nodeId = nodeId;
  this.context = context;
  this.hostName = hostName;
  this.commandPort = cmPort;
  this.httpPort = httpPort;
  this.totalCapability = capability; 
  this.nodeAddress = hostName + ":" + cmPort;
  this.httpAddress = hostName + ":" + httpPort;
  this.node = node;
  this.healthReport = "Healthy";
  this.lastHealthReportTime = System.currentTimeMillis();
  this.nodeManagerVersion = nodeManagerVersion;

  this.latestNodeHeartBeatResponse.setResponseId(0);

  ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
  this.readLock = lock.readLock();
  this.writeLock = lock.writeLock();

  this.stateMachine = stateMachineFactory.make(this);
  
  this.nodeUpdateQueue = new ConcurrentLinkedQueue<UpdatedContainerInfo>();  
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:RMNodeImpl.java

示例3: getAdditionalBlock

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * The client would like to obtain an additional block for the indicated
 * filename (which is being written-to).  Return an array that consists
 * of the block, plus a set of machines.  The first on this list should
 * be where the client writes data.  Subsequent items in the list must
 * be provided in the connection to the first datanode.
 *
 * Make sure the previous blocks have been reported by datanodes and
 * are replicated.  Will return an empty 2-elt array if we want the
 * client to "try again later".
 */
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
    ExtendedBlock previous, Set<Node> excludedNodes, 
    List<String> favoredNodes) throws IOException {
  LocatedBlock[] onRetryBlock = new LocatedBlock[1];
  DatanodeStorageInfo targets[] = getNewBlockTargets(src, fileId,
      clientName, previous, excludedNodes, favoredNodes, onRetryBlock);
  if (targets == null) {
    assert onRetryBlock[0] != null : "Retry block is null";
    // This is a retry. Just return the last block.
    return onRetryBlock[0];
  }
  LocatedBlock newBlock = storeAllocatedBlock(
      src, fileId, clientName, previous, targets);
  return newBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSNamesystem.java

示例4: addBlock

import org.apache.hadoop.net.Node; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
    String[] favoredNodes)
    throws IOException {
  checkNNStartup();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
        + " fileId=" + fileId + " for " + clientName);
  }
  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<Node>(excludedNodes.length);
    for (Node node : excludedNodes) {
      excludedNodesSet.add(node);
    }
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
      clientName, previous, excludedNodesSet, favoredNodesList);
  if (locatedBlock != null)
    metrics.incrAddBlockOps();
  return locatedBlock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:NameNodeRpcServer.java

示例5: chooseTarget4NewBlock

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * Choose target datanodes for creating a new block.
 * 
 * @throws IOException
 *           if the number of targets < minimum replication.
 * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
 *      Set, long, List, BlockStoragePolicy)
 */
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
    final int numOfReplicas, final Node client,
    final Set<Node> excludedNodes,
    final long blocksize,
    final List<String> favoredNodes,
    final byte storagePolicyID) throws IOException {
  List<DatanodeDescriptor> favoredDatanodeDescriptors = 
      getDatanodeDescriptors(favoredNodes);
  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
      numOfReplicas, client, excludedNodes, blocksize, 
      favoredDatanodeDescriptors, storagePolicy);
  if (targets.length < minReplication) {
    throw new IOException("File " + src + " could only be replicated to "
        + targets.length + " nodes instead of minReplication (="
        + minReplication + ").  There are "
        + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
        + " datanode(s) running and "
        + (excludedNodes == null? "no": excludedNodes.size())
        + " node(s) are excluded in this operation.");
  }
  return targets;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:BlockManager.java

示例6: chooseFromNextRack

import org.apache.hadoop.net.Node; //导入依赖的package包/类
private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BlockPlacementPolicyDefault.java

示例7: addIfIsGoodTarget

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * If the given storage is a good target, add it to the result list and
 * update the set of excluded nodes.
 * @return -1 if the given is not a good target;
 *         otherwise, return the number of nodes added to excludedNodes set.
 */
int addIfIsGoodTarget(DatanodeStorageInfo storage,
    Set<Node> excludedNodes,
    long blockSize,
    int maxNodesPerRack,
    boolean considerLoad,
    List<DatanodeStorageInfo> results,                           
    boolean avoidStaleNodes,
    StorageType storageType) {
  if (isGoodTarget(storage, blockSize, maxNodesPerRack, considerLoad,
      results, avoidStaleNodes, storageType)) {
    results.add(storage);
    // add node and related nodes to excludedNode
    return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
  } else { 
    return -1;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:BlockPlacementPolicyDefault.java

示例8: chooseRemoteRack

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * {@inheritDoc}
 */
@Override
protected void chooseRemoteRack(int numOfReplicas,
    DatanodeDescriptor localMachine, Set<Node> excludedNodes,
    long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes)
    throws NotEnoughReplicasException {
  int oldNumOfReplicas = results.size();

  final String rackLocation = NetworkTopology.getFirstHalf(
      localMachine.getNetworkLocation());
  try {
    // randomly choose from remote racks
    chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e) {
    // fall back to the local rack
    chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
        rackLocation, excludedNodes, blocksize,
        maxReplicasPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例9: addToExcludedNodes

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * Find other nodes in the same nodegroup of <i>localMachine</i> and add them
 * into <i>excludeNodes</i> as replica should not be duplicated for nodes 
 * within the same nodegroup
 * @return number of new excluded nodes
 */
@Override
protected int addToExcludedNodes(DatanodeDescriptor chosenNode,
    Set<Node> excludedNodes) {
  int countOfExcludedNodes = 0;
  String nodeGroupScope = chosenNode.getNetworkLocation();
  List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
  for (Node leafNode : leafNodes) {
    if (excludedNodes.add(leafNode)) {
      // not a existing node in excludedNodes
      countOfExcludedNodes++;
    }
  }
  
  countOfExcludedNodes += addDependentNodesToExcludedNodes(
      chosenNode, excludedNodes);
  return countOfExcludedNodes;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例10: addDependentNodesToExcludedNodes

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * Add all nodes from a dependent nodes list to excludedNodes.
 * @return number of new excluded nodes
 */
private int addDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode,
    Set<Node> excludedNodes) {
  if (this.host2datanodeMap == null) {
    return 0;
  }
  int countOfExcludedNodes = 0;
  for(String hostname : chosenNode.getDependentHostNames()) {
    DatanodeDescriptor node =
        this.host2datanodeMap.getDataNodeByHostName(hostname);
    if(node!=null) {
      if (excludedNodes.add(node)) {
        countOfExcludedNodes++;
      }
    } else {
      LOG.warn("Not able to find datanode " + hostname
          + " which has dependency with datanode "
          + chosenNode.getHostName());
    }
  }
  
  return countOfExcludedNodes;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:BlockPlacementPolicyWithNodeGroup.java

示例11: chooseTarget

import org.apache.hadoop.net.Node; //导入依赖的package包/类
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDeleteRace.java

示例12: coreResolve

import org.apache.hadoop.net.Node; //导入依赖的package包/类
private static Node coreResolve(String hostName) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(hostName);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    if (LOG.isDebugEnabled()) {
      LOG.debug("Couldn't resolve " + hostName + ". Falling back to "
          + NetworkTopology.DEFAULT_RACK);
    }
  } else {
    rName = rNameList.get(0);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Resolved " + hostName + " to " + rName);
    }
  }
  return new NodeBase(hostName, rName);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:RackResolver.java

示例13: chooseTargetForNewBlock

import org.apache.hadoop.net.Node; //导入依赖的package包/类
static DatanodeStorageInfo[] chooseTargetForNewBlock(
    BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
    favoredNodes, ValidateAddBlockResult r) throws IOException {
  Node clientNode = bm.getDatanodeManager()
      .getDatanodeByHost(r.clientMachine);
  if (clientNode == null) {
    clientNode = getClientNode(bm, r.clientMachine);
  }

  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<>(excludedNodes.length);
    Collections.addAll(excludedNodesSet, excludedNodes);
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);

  // choose targets for the new block to be allocated.
  return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                  excludedNodesSet, r.blockSize,
                                  favoredNodesList, r.storagePolicyID,
                                  r.isStriped);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:FSDirWriteFileOp.java

示例14: chooseFavouredNodes

import org.apache.hadoop.net.Node; //导入依赖的package包/类
protected void chooseFavouredNodes(String src, int numOfReplicas,
    List<DatanodeDescriptor> favoredNodes,
    Set<Node> favoriteAndExcludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes)
    throws NotEnoughReplicasException {
  for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas;
      i++) {
    DatanodeDescriptor favoredNode = favoredNodes.get(i);
    // Choose a single node which is local to favoredNode.
    // 'results' is updated within chooseLocalNode
    final DatanodeStorageInfo target =
        chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes, false);
    if (target == null) {
      LOG.warn("Could not find a target for file " + src
          + " with favored node " + favoredNode);
      continue;
    }
    favoriteAndExcludedNodes.add(target.getDatanodeDescriptor());
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:BlockPlacementPolicyDefault.java

示例15: chooseLocalStorage

import org.apache.hadoop.net.Node; //导入依赖的package包/类
/**
 * Choose <i>localMachine</i> as the target.
 * if <i>localMachine</i> is not available,
 * choose a node on the same rack
 * @return the chosen storage
 */
protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack)
    throws NotEnoughReplicasException {
  DatanodeStorageInfo localStorage = chooseLocalStorage(localMachine,
      excludedNodes, blocksize, maxNodesPerRack, results,
      avoidStaleNodes, storageTypes);
  if (localStorage != null) {
    return localStorage;
  }

  if (!fallbackToLocalRack) {
    return null;
  }
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, blocksize,
      maxNodesPerRack, results, avoidStaleNodes, storageTypes);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:BlockPlacementPolicyDefault.java


注:本文中的org.apache.hadoop.net.Node类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。