当前位置: 首页>>代码示例>>Java>>正文


Java HAUtil.cloneDelegationTokenForLogicalUri方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.HAUtil.cloneDelegationTokenForLogicalUri方法的典型用法代码示例。如果您正苦于以下问题:Java HAUtil.cloneDelegationTokenForLogicalUri方法的具体用法?Java HAUtil.cloneDelegationTokenForLogicalUri怎么用?Java HAUtil.cloneDelegationTokenForLogicalUri使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.HAUtil的用法示例。


在下文中一共展示了HAUtil.cloneDelegationTokenForLogicalUri方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cloneHaNnCredentials

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
/**
 * Clones the delegation token to individual host behind the same logical address.
 *
 * @param config the hadoop configuration
 * @throws IOException if failed to get information for the current user.
 */
public static void cloneHaNnCredentials(Configuration config) throws IOException {
  String scheme = URI.create(config.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
                                        CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)).getScheme();

  // Loop through all name services. Each name service could have multiple name node associated with it.
  for (Map.Entry<String, Map<String, InetSocketAddress>> entry : DFSUtil.getHaNnRpcAddresses(config).entrySet()) {
    String nsId = entry.getKey();
    Map<String, InetSocketAddress> addressesInNN = entry.getValue();
    if (!HAUtil.isHAEnabled(config, nsId) || addressesInNN == null || addressesInNN.isEmpty()) {
      continue;
    }

    // The client may have a delegation token set for the logical
    // URI of the cluster. Clone this token to apply to each of the
    // underlying IPC addresses so that the IPC code can find it.
    URI uri = URI.create(scheme + "://" + nsId);

    LOG.info("Cloning delegation token for uri {}", uri);
    HAUtil.cloneDelegationTokenForLogicalUri(UserGroupInformation.getCurrentUser(), uri, addressesInNN.values());
  }
}
 
开发者ID:apache,项目名称:twill,代码行数:28,代码来源:YarnUtils.java

示例2: ConfiguredFailoverProxyProvider

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface) {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  
  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);
  
  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      DFSConfigKeys.DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      maxRetriesOnSocketTimeouts);
  
  try {
    ugi = UserGroupInformation.getCurrentUser();
    
    Map<String, Map<String, InetSocketAddress>> map = DFSUtil.getHaNnRpcAddresses(
        conf);
    Map<String, InetSocketAddress> addressesInNN = map.get(uri.getHost());
    
    if (addressesInNN == null || addressesInNN.size() == 0) {
      throw new RuntimeException("Could not find any configured addresses " +
          "for URI " + uri);
    }
    
    Collection<InetSocketAddress> addressesOfNns = addressesInNN.values();
    for (InetSocketAddress address : addressesOfNns) {
      proxies.add(new AddressRpcProxyPair<T>(address));
    }

    // The client may have a delegation token set for the logical
    // URI of the cluster. Clone this token to apply to each of the
    // underlying IPC addresses so that the IPC code can find it.
    HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:ConfiguredFailoverProxyProvider.java

示例3: ConfiguredFailoverProxyProvider

import org.apache.hadoop.hdfs.HAUtil; //导入方法依赖的package包/类
@VisibleForTesting
ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
    Class<T> xface, ProxyFactory<T> factory) {

  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface class %s is not a valid NameNode protocol!");
  this.xface = xface;
  
  this.conf = new Configuration(conf);
  int maxRetries = this.conf.getInt(
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_KEY,
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_DEFAULT);
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      maxRetries);
  
  int maxRetriesOnSocketTimeouts = this.conf.getInt(
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      HdfsClientConfigKeys.Failover.CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT);
  this.conf.setInt(
          CommonConfigurationKeysPublic
                  .IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
          maxRetriesOnSocketTimeouts);

  try {
    ugi = UserGroupInformation.getCurrentUser();
    
    Map<String, Map<String, InetSocketAddress>> map = DFSUtil.getHaNnRpcAddresses(
        conf);
    Map<String, InetSocketAddress> addressesInNN = map.get(uri.getHost());
    
    if (addressesInNN == null || addressesInNN.size() == 0) {
      throw new RuntimeException("Could not find any configured addresses " +
          "for URI " + uri);
    }
    
    Collection<InetSocketAddress> addressesOfNns = addressesInNN.values();
    for (InetSocketAddress address : addressesOfNns) {
      proxies.add(new AddressRpcProxyPair<T>(address));
    }

    // The client may have a delegation token set for the logical
    // URI of the cluster. Clone this token to apply to each of the
    // underlying IPC addresses so that the IPC code can find it.
    HAUtil.cloneDelegationTokenForLogicalUri(ugi, uri, addressesOfNns);
    this.factory = factory;
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:52,代码来源:ConfiguredFailoverProxyProvider.java


注:本文中的org.apache.hadoop.hdfs.HAUtil.cloneDelegationTokenForLogicalUri方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。