当前位置: 首页>>代码示例>>Java>>正文


Java ScriptBasedMapping类代码示例

本文整理汇总了Java中org.apache.hadoop.net.ScriptBasedMapping的典型用法代码示例。如果您正苦于以下问题:Java ScriptBasedMapping类的具体用法?Java ScriptBasedMapping怎么用?Java ScriptBasedMapping使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ScriptBasedMapping类属于org.apache.hadoop.net包,在下文中一共展示了ScriptBasedMapping类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
public synchronized static void init(Configuration conf) {
  if (initCalled) {
    return;
  } else {
    initCalled = true;
  }
  Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
    conf.getClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
      ScriptBasedMapping.class,
      DNSToSwitchMapping.class);
  try {
    DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
        dnsToSwitchMappingClass, conf);
    // Wrap around the configured class with the Cached implementation so as
    // to save on repetitive lookups.
    // Check if the impl is already caching, to avoid double caching.
    dnsToSwitchMapping =
        ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
            : new CachedDNSToSwitchMapping(newInstance));
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:RackResolver.java

示例2: testScriptName

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
@Test
public void testScriptName() {
  Configuration conf = new Configuration();
  conf
      .setClass(
          CommonConfigurationKeysPublic.
              NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          ScriptBasedMapping.class, DNSToSwitchMapping.class);
  conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
      "testScript");
  RackResolver.init(conf);
  Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),
      "script-based mapping with script testScript");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestRackResolverScriptBasedMapping.java

示例3: activate

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
/**
 * Activate FSNamesystem daemons.
 */
void activate(Configuration conf) throws IOException {
  setBlockTotal();
  blockManager.activate();
  this.hbthread = new Daemon(new HeartbeatMonitor());
  this.lmthread = new Daemon(leaseManager.new Monitor());
  this.replthread = new Daemon(new ReplicationMonitor());
  hbthread.start();
  lmthread.start();
  replthread.start();

  this.dnthread = new Daemon(new DecommissionManager(this).new Monitor(
      conf.getInt("dfs.namenode.decommission.interval", 30),
      conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
  dnthread.start();

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                    ScriptBasedMapping.class,
          DNSToSwitchMapping.class), conf);
  
  /* If the dns to swith mapping supports cache, resolve network 
   * locations of those hosts in the include list, 
   * and store the mapping in the cache; so future calls to resolve
   * will be fast.
   */
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
  }
  registerMXBean();
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:34,代码来源:FSNamesystem.java

示例4: TopologyCache

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
public TopologyCache(Configuration conf) {
  super(conf);

  dnsToSwitchMapping = ReflectionUtils.newInstance
    (conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
                   DNSToSwitchMapping.class), conf);
  LOG.info("DnsToSwitchMapping class = " + dnsToSwitchMapping.getClass().getName());
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:9,代码来源:TopologyCache.java

示例5: initialize

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
/**
 * Initialize the filesystem store -this creates the REST client binding.
 *
 * @param fsURI         URI of the filesystem, which is used to map to the filesystem-specific
 *                      options in the configuration file
 * @param configuration configuration
 * @throws IOException on any failure.
 */
public void initialize(URI fsURI, Configuration configuration) throws IOException {
  this.uri = fsURI;
  dnsToSwitchMapping = ReflectionUtils.newInstance(
      configuration.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
          DNSToSwitchMapping.class), configuration);

  this.swiftRestClient = SwiftRestClient.getInstance(fsURI, configuration);
}
 
开发者ID:openstack,项目名称:sahara-extra,代码行数:17,代码来源:SwiftNativeFileSystemStore.java

示例6: RackManager

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
public RackManager(Configuration conf) {
  switchMapping = ReflectionUtils.instantiateWithCustomCtor(
      conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class,
           DNSToSwitchMapping.class).getName(), new Class<?>[]{Configuration.class},
             new Object[]{conf});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:RackManager.java

示例7: DatanodeManager

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
    final Configuration conf) throws IOException {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  
  this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);

  networktopology = NetworkTopology.getInstance(conf);

  this.defaultXferPort = NetUtils.createSocketAddr(
        conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
  this.defaultInfoPort = NetUtils.createSocketAddr(
        conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
  this.defaultIpcPort = NetUtils.createSocketAddr(
        conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
  try {
    this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
      conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
  } catch (IOException e) {
    LOG.error("error reading hosts files: ", e);
  }

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
          ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
  
  // If the dns to switch mapping supports cache, resolve network
  // locations of those hosts in the include list and store the mapping
  // in the cache; so future calls to resolve will be fast.
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    final ArrayList<String> locations = new ArrayList<String>();
    for (Entry entry : hostFileManager.getIncludes()) {
      if (!entry.getIpAddress().isEmpty()) {
        locations.add(entry.getIpAddress());
      }
    }
    dnsToSwitchMapping.resolve(locations);
  };

  final long heartbeatIntervalSeconds = conf.getLong(
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
  final int heartbeatRecheckInterval = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
      + 10 * 1000 * heartbeatIntervalSeconds;
  final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
  this.blockInvalidateLimit = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
  LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
      + "=" + this.blockInvalidateLimit);

  this.avoidStaleDataNodesForRead = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
  this.avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
  this.ratioUseStaleDataNodesForWrite = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
  Preconditions.checkArgument(
      (ratioUseStaleDataNodesForWrite > 0 && 
          ratioUseStaleDataNodesForWrite <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
      " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
      "It should be a positive non-zero float value, not greater than 1.0f.");
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:75,代码来源:DatanodeManager.java

示例8: DatanodeManager

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
    final Configuration conf) throws IOException {
  this.namesystem = namesystem;
  this.blockManager = blockManager;

  this.networktopology = NetworkTopology.getInstance(conf);

  this.heartbeatManager =
      new HeartbeatManager(namesystem, blockManager, conf);

  this.hostsReader =
      new HostsFileReader(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
          conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
  
  // If the dns to switch mapping supports cache, resolve network
  // locations of those hosts in the include list and store the mapping
  // in the cache; so future calls to resolve will be fast.
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    dnsToSwitchMapping.resolve(new ArrayList<>(hostsReader.getHosts()));
  }
  
  final long heartbeatIntervalSeconds =
      conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
          DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
  final int heartbeatRecheckInterval =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
          DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
  this.heartbeatExpireInterval =
      2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds;
  final int blockInvalidateLimit =
      Math.max(20 * (int) (heartbeatIntervalSeconds),
          DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
  this.blockInvalidateLimit =
      conf.getInt(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY,
          blockInvalidateLimit);
  LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" +
      this.blockInvalidateLimit);

  this.avoidStaleDataNodesForRead = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
  this.avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  this.staleInterval =
      getStaleIntervalFromConf(conf, heartbeatExpireInterval);
  this.ratioUseStaleDataNodesForWrite = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
  Preconditions.checkArgument((ratioUseStaleDataNodesForWrite > 0 &&
          ratioUseStaleDataNodesForWrite <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
          " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
          "It should be a positive non-zero float value, not greater than 1.0f.");
  
  this.storageIdMap = new StorageIdMap();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:62,代码来源:DatanodeManager.java

示例9: initialize

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
/**
 * Initialize FSNamesystem.
 */
private void initialize(NameNode nn, Configuration conf) throws IOException {
  this.systemStart = now();
  setConfigurationParameters(conf);
  dtSecretManager = createDelegationTokenSecretManager(conf);

  this.nameNodeAddress = nn.getNameNodeAddress();
  this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
  this.dir = new FSDirectory(this, conf);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  this.dir.loadFSImage(getNamespaceDirs(conf),
                       getNamespaceEditsDirs(conf), startOpt);
  long timeTakenToLoadFSImage = now() - systemStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNode.getNameNodeMetrics().setFsImageLoadTime(timeTakenToLoadFSImage);
  this.safeMode = new SafeModeInfo(conf);
  setBlockTotal();
  pendingReplications = new PendingReplicationBlocks(
                          conf.getInt("dfs.replication.pending.timeout.sec", 
                                      -1) * 1000L);
  if (isAccessTokenEnabled) {
    accessTokenHandler = new BlockTokenSecretManager(true,
        accessKeyUpdateInterval, accessTokenLifetime);
  }
  this.hbthread = new Daemon(new HeartbeatMonitor());
  this.lmthread = new Daemon(leaseManager.new Monitor());
  this.replmon = new ReplicationMonitor();
  this.replthread = new Daemon(replmon);
  hbthread.start();
  lmthread.start();
  replthread.start();

  this.hostsReader = new HostsFileReader(conf.get("dfs.hosts",""),
                                         conf.get("dfs.hosts.exclude",""));
  this.dnthread = new Daemon(new DecommissionManager(this).new Monitor(
      conf.getInt("dfs.namenode.decommission.interval", 30),
      conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
  dnthread.start();

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
          DNSToSwitchMapping.class), conf);
  
  /* If the dns to swith mapping supports cache, resolve network 
   * locations of those hosts in the include list, 
   * and store the mapping in the cache; so future calls to resolve
   * will be fast.
   */
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
  }
  
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  this.nameNodeHostName = socAddr.getHostName();
  
  registerWith(DefaultMetricsSystem.INSTANCE);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:60,代码来源:FSNamesystem.java

示例10: initialize

import org.apache.hadoop.net.ScriptBasedMapping; //导入依赖的package包/类
/**
 * Initialize FSNamesystem.
 */
private void initialize(NameNode nn, Configuration conf) throws IOException {
  this.systemStart = now();
  setConfigurationParameters(conf);

  this.nameNodeAddress = nn.getNameNodeAddress();
  this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
  this.dir = new FSDirectory(this, conf);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  this.dir.loadFSImage(getNamespaceDirs(conf),
                       getNamespaceEditsDirs(conf), startOpt);
  long timeTakenToLoadFSImage = now() - systemStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNode.getNameNodeMetrics().fsImageLoadTime.set(
                            (int) timeTakenToLoadFSImage);
  this.safeMode = new SafeModeInfo(conf);
  setBlockTotal();
  pendingReplications = new PendingReplicationBlocks(
                          conf.getInt("dfs.replication.pending.timeout.sec", 
                                      -1) * 1000L);
  this.hbthread = new Daemon(new HeartbeatMonitor());
  this.lmthread = new Daemon(leaseManager.new Monitor());
  this.replthread = new Daemon(new ReplicationMonitor());
  hbthread.start();
  lmthread.start();
  replthread.start();

  this.hostsReader = new HostsFileReader(conf.get("dfs.hosts",""),
                                         conf.get("dfs.hosts.exclude",""));
  this.dnthread = new Daemon(new DecommissionManager(this).new Monitor(
      conf.getInt("dfs.namenode.decommission.interval", 30),
      conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
  dnthread.start();

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
          DNSToSwitchMapping.class), conf);
  
  /* If the dns to swith mapping supports cache, resolve network 
   * locations of those hosts in the include list, 
   * and store the mapping in the cache; so future calls to resolve
   * will be fast.
   */
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:50,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.net.ScriptBasedMapping类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。