当前位置: 首页>>代码示例>>Java>>正文


Java NetworkTopology.DEFAULT_RACK属性代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetworkTopology.DEFAULT_RACK属性的典型用法代码示例。如果您正苦于以下问题:Java NetworkTopology.DEFAULT_RACK属性的具体用法?Java NetworkTopology.DEFAULT_RACK怎么用?Java NetworkTopology.DEFAULT_RACK使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.net.NetworkTopology的用法示例。


在下文中一共展示了NetworkTopology.DEFAULT_RACK属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: validateLabelsRequests

private void validateLabelsRequests(ResourceRequest resourceRequest,
    boolean isReduce) {
  switch (resourceRequest.getResourceName()) {
  case "map":
  case "reduce":
  case NetworkTopology.DEFAULT_RACK:
    Assert.assertNull(resourceRequest.getNodeLabelExpression());
    break;
  case "*":
    Assert.assertEquals(isReduce ? "ReduceNodes" : "MapNodes",
        resourceRequest.getNodeLabelExpression());
    break;
  default:
    Assert.fail("Invalid resource location "
        + resourceRequest.getResourceName());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestRMContainerAllocator.java

示例2: createReq

private ContainerRequestEvent
    createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
        boolean earlierFailedAttempt, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  Resource containerNeed = Resource.newInstance(memory, 1);
  if (earlierFailedAttempt) {
    return ContainerRequestEvent
        .createContainerRequestEventForFailedContainer(attemptId,
            containerNeed);
  }
  return new ContainerRequestEvent(attemptId, containerNeed, hosts,
      new String[] { NetworkTopology.DEFAULT_RACK });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestRMContainerAllocator.java

示例3: coreResolve

private static Node coreResolve(String hostName) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(hostName);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    if (LOG.isDebugEnabled()) {
      LOG.debug("Couldn't resolve " + hostName + ". Falling back to "
          + NetworkTopology.DEFAULT_RACK);
    }
  } else {
    rName = rNameList.get(0);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Resolved " + hostName + " to " + rName);
    }
  }
  return new NodeBase(hostName, rName);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:RackResolver.java

示例4: resolveNetworkLocation

private void resolveNetworkLocation(DatanodeDescriptor node) {
  List<String> names = new ArrayList<>(1);
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    names.add(node.getIpAddr());
  } else {
    names.add(node.getHostName());
  }
  
  // resolve its network location
  List<String> rName = dnsToSwitchMapping.resolve(names);
  String networkLocation;
  if (rName == null) {
    LOG.error("The resolve call returned null! Using " +
        NetworkTopology.DEFAULT_RACK + " for host " + names);
    networkLocation = NetworkTopology.DEFAULT_RACK;
  } else {
    networkLocation = rName.get(0);
  }
  node.setNetworkLocation(networkLocation);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:DatanodeManager.java

示例5: resolveNetworkLocation

private String resolveNetworkLocation (DatanodeID node) {
  List<String> names = new ArrayList<String>(1);
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    names.add(node.getIpAddr());
  } else {
    names.add(node.getHostName());
  }
  
  // resolve its network location
  List<String> rName = dnsToSwitchMapping.resolve(names);
  String networkLocation;
  if (rName == null) {
    LOG.error("The resolve call returned null! Using " + 
        NetworkTopology.DEFAULT_RACK + " for host " + names);
    networkLocation = NetworkTopology.DEFAULT_RACK;
  } else {
    networkLocation = rName.get(0);
  }
  return networkLocation;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:DatanodeManager.java

示例6: coreResolve

private static Node coreResolve(String hostName) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(hostName);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    LOG.info("Couldn't resolve " + hostName + ". Falling back to "
        + NetworkTopology.DEFAULT_RACK);
  } else {
    rName = rNameList.get(0);
    LOG.info("Resolved " + hostName + " to " + rName);
  }
  return new NodeBase(hostName, rName);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:RackResolver.java

示例7: fakeRacks

private String[] fakeRacks(BlockLocation[] blkLocations, int index) 
throws IOException {
  String[] allHosts = blkLocations[index].getHosts();
  String[] allTopos = new String[allHosts.length];
  for (int i = 0; i < allHosts.length; i++) {
    allTopos[i] = NetworkTopology.DEFAULT_RACK + "/" + allHosts[i];
  }
  return allTopos;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:FileInputFormat.java

示例8: resolveAndAddToTopology

public Node resolveAndAddToTopology(String name) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(name);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    LOG.warn("Couldn't resolve " + name + ". Falling back to "
        + NetworkTopology.DEFAULT_RACK);
  } else {
    rName = rNameList.get(0);
  }
  String networkLoc = NodeBase.normalize(rName);
  return addHostToNodeMapping(name, networkLoc);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:JobTracker.java

示例9: MiniCoronaCluster

private MiniCoronaCluster(Builder builder) throws IOException {
  ContextFactory.resetFactory();
  setNoEmitMetricsContext();
  if (builder.racks != null && builder.hosts != null) {
    if (builder.racks.length != builder.hosts.length) {
      throw new IllegalArgumentException(
          "The number of hosts and racks must be the same");
    }
  }
  this.conf = builder.conf != null ? builder.conf : new JobConf();
  this.namenode = builder.namenode;
  this.ugi = builder.ugi;
  this.conf.set(CoronaConf.CM_ADDRESS, "localhost:0");
  this.conf.set(CoronaConf.CPU_TO_RESOURCE_PARTITIONING, TstUtils.std_cpu_to_resource_partitioning);
  this.clusterManagerPort = startClusterManager(this.conf);
  this.conf.set(CoronaConf.PROXY_JOB_TRACKER_ADDRESS, "localhost:0");
  // if there is any problem with dependencies, please implement
  // getFreePort() in MiniCoronaCluster
  this.conf.set(CoronaConf.PROXY_JOB_TRACKER_THRIFT_ADDRESS, "localhost:"
      + MiniDFSCluster.getFreePort());
  // Because we change to get system dir from ProxyJobTracker,
  // we need to tell the proxy job tracker which sysFs to use
  CoronaConf pjtConf = new CoronaConf(conf);
  FileSystem.setDefaultUri(pjtConf, namenode);
  pjt = ProxyJobTracker.startProxyTracker(pjtConf);
  this.proxyJobTrackerPort = pjt.getRpcPort();
  configureJobConf(conf, builder.namenode, clusterManagerPort,
    proxyJobTrackerPort, builder.ugi, null);
  this.rjtFailureInjector = builder.rjtFailureInjector;
  for (int i = 0; i < builder.numTaskTrackers; ++i) {
    String host = builder.hosts == null ?
        "host" + i + ".foo.com" : builder.hosts[i];
    String rack = builder.racks == null ?
        NetworkTopology.DEFAULT_RACK : builder.racks[i];
    startTaskTracker(host, rack, i, builder.numDir);
  }
  waitTaskTrackers();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:38,代码来源:MiniCoronaCluster.java

示例10: resolveNetworkLocation

void resolveNetworkLocation(DatanodeInfo node) {
  List<String> names = new ArrayList<String>(1);
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    // get the node's IP address
    names.add(node.getHost());
  } else if (dnsToSwitchMapping instanceof StaticMapping) {
    names.add(toHostPort(node));
  } else {
    // get the node's host name
    String hostName = node.getHostName();
    int colon = hostName.indexOf(":");
    hostName = (colon == -1) ? hostName : hostName.substring(0, colon);
    names.add(hostName);
  }

  // resolve its network location
  List<String> rName = dnsToSwitchMapping.resolve(names);
  String networkLocation;
  if (rName == null) {
    LOG.error("The resolve call returned null! Using " +
      NetworkTopology.DEFAULT_RACK + " for host " + names);
    networkLocation = NetworkTopology.DEFAULT_RACK;
  } else {
    networkLocation = rName.get(0);
  }
  node.setNetworkLocation(networkLocation);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:FSNamesystem.java

示例11: fakeRacks

private String[] fakeRacks(final BlockLocation[] blkLocations, final int index)
    throws IOException {
  final String[] allHosts = blkLocations[index].getHosts();
  final String[] allTopos = new String[allHosts.length];
  for (int i = 0; i < allHosts.length; i++) {
    allTopos[i] = NetworkTopology.DEFAULT_RACK + "/" + allHosts[i];
  }
  return allTopos;
}
 
开发者ID:snuspl,项目名称:cruise,代码行数:9,代码来源:ExactNumSplitFileInputFormat.java

示例12: resolve

public static String resolve(String hostName) {
  return NetworkTopology.DEFAULT_RACK;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:3,代码来源:Application.java

示例13: createReq

private ContainerRequest createReq(int priority, int memory, String[] hosts) {
  Resource capability = Resource.newInstance(memory, 1);
  Priority priorityOfContainer = Priority.newInstance(priority);
  return new ContainerRequest(capability, hosts,
      new String[] { NetworkTopology.DEFAULT_RACK }, priorityOfContainer);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestAMRMClientOnRMRestart.java


注:本文中的org.apache.hadoop.net.NetworkTopology.DEFAULT_RACK属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。