当前位置: 首页>>代码示例>>Java>>正文


Java NetworkTopology.getInstance方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetworkTopology.getInstance方法的典型用法代码示例。如果您正苦于以下问题:Java NetworkTopology.getInstance方法的具体用法?Java NetworkTopology.getInstance怎么用?Java NetworkTopology.getInstance使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.NetworkTopology的用法示例。


在下文中一共展示了NetworkTopology.getInstance方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: Dispatcher

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:Dispatcher.java

示例2: reset

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/** Reset all fields in order to prepare for the next iteration */
void reset(Configuration conf) {
  cluster = NetworkTopology.getInstance(conf);
  storageGroupMap.clear();
  sources.clear();
  targets.clear();
  globalBlocks.removeAllButRetain(movedBlocks);
  movedBlocks.cleanup();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:Dispatcher.java

示例3: Dispatcher

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode,
    long getBlocksSize, long getBlocksMinBlockSize, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.moverThreadAllocator = new Allocator(moverThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.getBlocksSize = getBlocksSize;
  this.getBlocksMinBlockSize = getBlocksMinBlockSize;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
  this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
  this.connectToDnViaHostname = conf.getBoolean(
      HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
      HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
  placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:Dispatcher.java

示例4: reset

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/** Reset all fields in order to prepare for the next iteration */
void reset(Configuration conf) {
  cluster = NetworkTopology.getInstance(conf);
  storageGroupMap.clear();
  sources.clear();

  moverThreadAllocator.reset();
  for(StorageGroup t : targets) {
    t.getDDatanode().shutdownMoveExecutor();
  }
  targets.clear();
  globalBlocks.removeAllButRetain(movedBlocks);
  movedBlocks.cleanup();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:Dispatcher.java

示例5: Balancer

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
/**
 * Construct a balancer.
 * Initialize balancer. It sets the value of the threshold, and 
 * builds the communication proxies to
 * namenode as a client and a secondary namenode and retry proxies
 * when connection fails.
 */
Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) {
  this.threshold = p.threshold;
  this.policy = p.policy;
  this.nnc = theblockpool;
  cluster = NetworkTopology.getInstance(conf);

  this.moverExecutor = Executors.newFixedThreadPool(
          conf.getInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY,
                      DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_DEFAULT));
  this.dispatcherExecutor = Executors.newFixedThreadPool(
          conf.getInt(DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
                      DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:Balancer.java

示例6: resetData

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
private void resetData(Configuration conf) {
  this.cluster = NetworkTopology.getInstance(conf);
  this.overUtilizedDatanodes.clear();
  this.aboveAvgUtilizedDatanodes.clear();
  this.belowAvgUtilizedDatanodes.clear();
  this.underUtilizedDatanodes.clear();
  this.datanodes.clear();
  this.sources.clear();
  this.targets.clear();  
  this.policy.reset();
  cleanGlobalBlockList();
  this.movedBlocks.cleanup();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:14,代码来源:Balancer.java

示例7: DatanodeManager

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
    final Configuration conf) throws IOException {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  
  this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);

  networktopology = NetworkTopology.getInstance(conf);

  this.defaultXferPort = NetUtils.createSocketAddr(
        conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
  this.defaultInfoPort = NetUtils.createSocketAddr(
        conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
  this.defaultIpcPort = NetUtils.createSocketAddr(
        conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
  try {
    this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
      conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
  } catch (IOException e) {
    LOG.error("error reading hosts files: ", e);
  }

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
          ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
  
  // If the dns to switch mapping supports cache, resolve network
  // locations of those hosts in the include list and store the mapping
  // in the cache; so future calls to resolve will be fast.
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    final ArrayList<String> locations = new ArrayList<String>();
    for (Entry entry : hostFileManager.getIncludes()) {
      if (!entry.getIpAddress().isEmpty()) {
        locations.add(entry.getIpAddress());
      }
    }
    dnsToSwitchMapping.resolve(locations);
  };

  final long heartbeatIntervalSeconds = conf.getLong(
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
  final int heartbeatRecheckInterval = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
      + 10 * 1000 * heartbeatIntervalSeconds;
  final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
  this.blockInvalidateLimit = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
  LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
      + "=" + this.blockInvalidateLimit);

  this.avoidStaleDataNodesForRead = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
  this.avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
  this.ratioUseStaleDataNodesForWrite = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
  Preconditions.checkArgument(
      (ratioUseStaleDataNodesForWrite > 0 && 
          ratioUseStaleDataNodesForWrite <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
      " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
      "It should be a positive non-zero float value, not greater than 1.0f.");
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:75,代码来源:DatanodeManager.java

示例8: DatanodeManager

import org.apache.hadoop.net.NetworkTopology; //导入方法依赖的package包/类
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
    final Configuration conf) throws IOException {
  this.namesystem = namesystem;
  this.blockManager = blockManager;

  this.networktopology = NetworkTopology.getInstance(conf);

  this.heartbeatManager =
      new HeartbeatManager(namesystem, blockManager, conf);

  this.hostsReader =
      new HostsFileReader(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
          conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
  
  // If the dns to switch mapping supports cache, resolve network
  // locations of those hosts in the include list and store the mapping
  // in the cache; so future calls to resolve will be fast.
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    dnsToSwitchMapping.resolve(new ArrayList<>(hostsReader.getHosts()));
  }
  
  final long heartbeatIntervalSeconds =
      conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
          DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
  final int heartbeatRecheckInterval =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
          DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
  this.heartbeatExpireInterval =
      2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds;
  final int blockInvalidateLimit =
      Math.max(20 * (int) (heartbeatIntervalSeconds),
          DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
  this.blockInvalidateLimit =
      conf.getInt(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
          blockInvalidateLimit);
  LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" +
      this.blockInvalidateLimit);

  this.avoidStaleDataNodesForRead = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
  this.avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  this.staleInterval =
      getStaleIntervalFromConf(conf, heartbeatExpireInterval);
  this.ratioUseStaleDataNodesForWrite = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
  Preconditions.checkArgument((ratioUseStaleDataNodesForWrite > 0 &&
          ratioUseStaleDataNodesForWrite <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
          " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
          "It should be a positive non-zero float value, not greater than 1.0f.");
  
  this.storageIdMap = new StorageIdMap();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:62,代码来源:DatanodeManager.java


注:本文中的org.apache.hadoop.net.NetworkTopology.getInstance方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。