当前位置: 首页>>代码示例>>Java>>正文


Java SecurityUtil.buildTokenService方法代码示例

本文整理汇总了Java中org.apache.hadoop.security.SecurityUtil.buildTokenService方法的典型用法代码示例。如果您正苦于以下问题:Java SecurityUtil.buildTokenService方法的具体用法?Java SecurityUtil.buildTokenService怎么用?Java SecurityUtil.buildTokenService使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.security.SecurityUtil的用法示例。


在下文中一共展示了SecurityUtil.buildTokenService方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createProxyWithNuCypherExtClientProtocol

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Creates the namenode proxy with the ClientProtocol. This will handle
 * creation of either HA- or non-HA-enabled proxy objects, depending upon
 * if the provided URI is a configured logical URI.
 *
 * @param conf the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param fallbackToSimpleAuth set to true or false during calls to indicate
 *        if a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException if there is an error creating the proxy
 * @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
 */
public static ProxyAndInfo<NuCypherExtClientProtocol> createProxyWithNuCypherExtClientProtocol(
    Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  AbstractNNFailoverProxyProvider<NuCypherExtClientProtocol> failoverProxyProvider =
      createFailoverProxyProvider(conf, nameNodeUri, NuCypherExtClientProtocol.class,
          true, fallbackToSimpleAuth);

  if (failoverProxyProvider == null) {
    InetSocketAddress nnAddr = NuCypherExtUtilClient.getNNAddress(nameNodeUri);
    Text dtService = SecurityUtil.buildTokenService(nnAddr);
    NuCypherExtClientProtocol proxy = createNonHAProxyWithNuCypherExtClientProtocol(nnAddr, conf,
        UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
    return new ProxyAndInfo<>(proxy, dtService, nnAddr);
  } else {
    return createHAProxy(conf, nameNodeUri, NuCypherExtClientProtocol.class,
        failoverProxyProvider);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:35,代码来源:NuCypherExtNameNodeProxiesClient.java

示例2: createHAProxy

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Creates an explicitly HA-enabled proxy object.
 *
 * @param conf the configuration object
 * @param nameNodeUri the URI pointing either to a specific NameNode or to a
 *        logical nameservice.
 * @param xface the IPC interface which should be created
 * @param failoverProxyProvider Failover proxy provider
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createHAProxy(
    Configuration conf, URI nameNodeUri, Class<T> xface,
    AbstractNNFailoverProxyProvider<T> failoverProxyProvider) {
  Preconditions.checkNotNull(failoverProxyProvider);
  // HA case
  DfsClientConf config = new DfsClientConf(conf);
  T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
      RetryPolicies.failoverOnNetworkException(
          RetryPolicies.TRY_ONCE_THEN_FAIL, config.getMaxFailoverAttempts(),
          config.getMaxRetryAttempts(), config.getFailoverSleepBaseMillis(),
          config.getFailoverSleepMaxMillis()));

  Text dtService;
  if (failoverProxyProvider.useLogicalURI()) {
    dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
        HdfsConstants.HDFS_URI_SCHEME);
  } else {
    dtService = SecurityUtil.buildTokenService(
        NuCypherExtUtilClient.getNNAddress(nameNodeUri));
  }
  return new ProxyAndInfo<>(proxy, dtService,
      NuCypherExtUtilClient.getNNAddressCheckLogical(conf, nameNodeUri));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:36,代码来源:NuCypherExtNameNodeProxiesClient.java

示例3: getTokenService

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
@Unstable
public static Text getTokenService(Configuration conf, String address,
    String defaultAddr, int defaultPort) {
  if (HAUtil.isHAEnabled(conf)) {
    // Build a list of service addresses to form the service name
    ArrayList<String> services = new ArrayList<String>();
    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    for (String rmId : HAUtil.getRMHAIds(conf)) {
      // Set RM_ID to get the corresponding RM_ADDRESS
      yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
      services.add(SecurityUtil.buildTokenService(
          yarnConf.getSocketAddr(address, defaultAddr, defaultPort))
          .toString());
    }
    return new Text(Joiner.on(',').join(services));
  }

  // Non-HA case - no need to set RM_ID
  return SecurityUtil.buildTokenService(conf.getSocketAddr(address,
    defaultAddr, defaultPort));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:ClientRMProxy.java

示例4: addHistoryToken

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
  /* check if we have a hsproxy, if not, no need */
  MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
  if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
    /*
     * note that get delegation token was called. Again this is hack for oozie
     * to make sure we add history server delegation tokens to the credentials
     */
    RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
    Text service = resMgrDelegate.getRMDelegationTokenService();
    if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
      Text hsService = SecurityUtil.buildTokenService(hsProxy
          .getConnectAddress());
      if (ts.getToken(hsService) == null) {
        ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:YARNRunner.java

示例5: initialize

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
@Override
public void initialize(final URI name, final Configuration conf)
throws IOException {
  super.initialize(name, conf);
  setConf(conf);
  this.connectionFactory = URLConnectionFactory
      .newDefaultURLConnectionFactory(conf);
  this.ugi = UserGroupInformation.getCurrentUser();
  this.nnUri = getNamenodeUri(name);
  this.tokenServiceName = SecurityUtil.buildTokenService(nnUri);

  try {
    this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
                           null, null, null);
  } catch (URISyntaxException e) {
    throw new IllegalArgumentException(e);
  }

  initTokenAspect();
  if (UserGroupInformation.isSecurityEnabled()) {
    tokenAspect.initDelegationToken(ugi);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:HftpFileSystem.java

示例6: selectToken

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Select the delegation token for hdfs.  The port will be rewritten to
 * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
 * This method should only be called by non-hdfs filesystems that do not
 * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
 * @param nnUri of the remote namenode
 * @param tokens as a collection
 * @param conf hadoop configuration
 * @return Token
 */
public Token<DelegationTokenIdentifier> selectToken(
    final URI nnUri, Collection<Token<?>> tokens,
    final Configuration conf) {
  // this guesses the remote cluster's rpc service port.
  // the current token design assumes it's the same as the local cluster's
  // rpc port unless a config key is set.  there should be a way to automatic
  // and correctly determine the value
  Text serviceName = SecurityUtil.buildTokenService(nnUri);
  final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
  
  int nnRpcPort = NameNode.DEFAULT_PORT;
  if (nnServiceName != null) {
    nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); 
  }
  // use original hostname from the uri to avoid unintentional host resolving
  serviceName = SecurityUtil.buildTokenService(
  		NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));
  
  return selectToken(serviceName, tokens);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:DelegationTokenSelector.java

示例7: getDelegationTokenService

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
private Text getDelegationTokenService() throws IOException {
  URL url = new URL(kmsUrl);
  InetSocketAddress addr = new InetSocketAddress(url.getHost(),
      url.getPort());
  Text dtService = SecurityUtil.buildTokenService(addr);
  return dtService;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:KMSClientProvider.java

示例8: createProxy

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Creates the namenode proxy with the passed protocol. This will handle
 * creation of either HA- or non-HA-enabled proxy objects, depending upon
 * if the provided URI is a configured logical URI.
 *
 * @param conf the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException if there is an error creating the proxy
 **/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
    URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(conf, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider == null) {
    // Non-HA case
    return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
        UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
  } else {
    // HA case
    Conf config = new Conf(conf);
    T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
        RetryPolicies.failoverOnNetworkException(
            RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
            config.maxRetryAttempts, config.failoverSleepBaseMillis,
            config.failoverSleepMaxMillis));

    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:NameNodeProxies.java

示例9: createNonHAProxy

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Creates an explicitly non-HA-enabled proxy object. Most of the time you
 * don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
 *
 * @param conf the configuration object
 * @param nnAddr address of the remote NN to connect to
 * @param xface the IPC interface which should be created
 * @param ugi the user who is making the calls on the proxy object
 * @param withRetries certain interfaces have a non-standard retry policy
 * @param fallbackToSimpleAuth - set to true or false during this method to
 *   indicate if a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createNonHAProxy(
    Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
    UserGroupInformation ugi, boolean withRetries,
    AtomicBoolean fallbackToSimpleAuth) throws IOException {
  Text dtService = SecurityUtil.buildTokenService(nnAddr);

  T proxy;
  if (xface == ClientProtocol.class) {
    proxy = (T) createNNProxyWithClientProtocol(nnAddr, conf, ugi,
        withRetries, fallbackToSimpleAuth);
  } else if (xface == JournalProtocol.class) {
    proxy = (T) createNNProxyWithJournalProtocol(nnAddr, conf, ugi);
  } else if (xface == NamenodeProtocol.class) {
    proxy = (T) createNNProxyWithNamenodeProtocol(nnAddr, conf, ugi,
        withRetries);
  } else if (xface == GetUserMappingsProtocol.class) {
    proxy = (T) createNNProxyWithGetUserMappingsProtocol(nnAddr, conf, ugi);
  } else if (xface == RefreshUserMappingsProtocol.class) {
    proxy = (T) createNNProxyWithRefreshUserMappingsProtocol(nnAddr, conf, ugi);
  } else if (xface == RefreshAuthorizationPolicyProtocol.class) {
    proxy = (T) createNNProxyWithRefreshAuthorizationPolicyProtocol(nnAddr,
        conf, ugi);
  } else if (xface == RefreshCallQueueProtocol.class) {
    proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi);
  } else {
    String message = "Unsupported protocol found when creating the proxy " +
        "connection to NameNode: " +
        ((xface != null) ? xface.getClass().getName() : "null");
    LOG.error(message);
    throw new IllegalStateException(message);
  }

  return new ProxyAndInfo<T>(proxy, dtService, nnAddr);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:NameNodeProxies.java

示例10: initialize

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
@Override
public void initialize(URI name, Configuration conf) throws IOException {
  super.initialize(name, conf);
  setConf(conf);
  this.uri = URI.create(name.getScheme() + "://" + name.getAuthority());
  tokenAspect = new TokenAspect<DummyFs>(this,
      SecurityUtil.buildTokenService(uri), TOKEN_KIND);
  if (emulateSecurityEnabled || UserGroupInformation.isSecurityEnabled()) {
    tokenAspect.initDelegationToken(ugi);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestTokenAspect.java

示例11: openConnection

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Returns an authenticated {@link HttpURLConnection}. If the Delegation
 * Token is present, it will be used taking precedence over the configured
 * <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
 * the request will be done on behalf of the specified <code>doAs</code> user.
 *
 * @param url the URL to connect to. Only HTTP/S URLs are supported.
 * @param token the authentication token being used for the user.
 * @param doAs user to do the the request on behalf of, if NULL the request is
 * as self.
 * @return an authenticated {@link HttpURLConnection}.
 * @throws IOException if an IO error occurred.
 * @throws AuthenticationException if an authentication exception occurred.
 */
@SuppressWarnings("unchecked")
public HttpURLConnection openConnection(URL url, Token token, String doAs)
    throws IOException, AuthenticationException {
  Preconditions.checkNotNull(url, "url");
  Preconditions.checkNotNull(token, "token");
  Map<String, String> extraParams = new HashMap<String, String>();
  org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken
      = null;
  // if we have valid auth token, it takes precedence over a delegation token
  // and we don't even look for one.
  if (!token.isSet()) {
    // delegation token
    Credentials creds = UserGroupInformation.getCurrentUser().
        getCredentials();
    if (!creds.getAllTokens().isEmpty()) {
      InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
          url.getPort());
      Text service = SecurityUtil.buildTokenService(serviceAddr);
      dToken = creds.getToken(service);
      if (dToken != null) {
        if (useQueryStringForDelegationToken()) {
          // delegation token will go in the query string, injecting it
          extraParams.put(
              KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
              dToken.encodeToUrlString());
        } else {
          // delegation token will go as request header, setting it in the
          // auth-token to ensure no authentication handshake is triggered
          // (if we have a delegation token, we are authenticated)
          // the delegation token header is injected in the connection request
          // at the end of this method.
          token.delegationToken = (org.apache.hadoop.security.token.Token
              <AbstractDelegationTokenIdentifier>) dToken;
        }
      }
    }
  }

  // proxyuser
  if (doAs != null) {
    extraParams.put(DO_AS, URLEncoder.encode(doAs, "UTF-8"));
  }

  url = augmentURL(url, extraParams);
  HttpURLConnection conn = super.openConnection(url, token);
  if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) {
    // injecting the delegation token header in the connection request
    conn.setRequestProperty(
        DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
        dToken.encodeToUrlString());
  }
  return conn;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:68,代码来源:DelegationTokenAuthenticatedURL.java

示例12: createProxyWithLossyRetryHandler

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not
 * for HA.
 *
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @param fallbackToSimpleAuth set to true or false during calls to indicate
 *        if a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(config, nameNodeUri, xface, true,
          fallbackToSimpleAuth);

  if (failoverProxyProvider != null) { // HA case
    int delay = config.getInt(
        HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
        HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
        HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = config.getInt(
        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
                Math.max(numResponseToDrop + 1, maxRetryAttempts), delay,
                maxCap));

    @SuppressWarnings("unchecked")
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[]{xface}, dummyHandler);
    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NuCypherExtUtilClient.getNNAddress(nameNodeUri));
    }
    return new ProxyAndInfo<>(proxy, dtService,
        NuCypherExtUtilClient.getNNAddress(nameNodeUri));
  } else {
    LOG.warn("Currently creating proxy using " +
        "LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:70,代码来源:NuCypherExtNameNodeProxiesClient.java

示例13: buildTimelineTokenService

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
public static Text buildTimelineTokenService(Configuration conf) {
  InetSocketAddress timelineServiceAddr =
      getTimelineTokenServiceAddress(conf);
  return SecurityUtil.buildTokenService(timelineServiceAddr);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TimelineUtils.java

示例14: createProxyWithLossyRetryHandler

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not 
 * for HA.
 * 
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(config, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider != null) { // HA case
    int delay = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = config.getInt(
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, 
                Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, 
                maxCap));
    
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { xface }, dummyHandler);
    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  } else {
    LOG.warn("Currently creating proxy using " +
    		"LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:NameNodeProxies.java

示例15: initialize

import org.apache.hadoop.security.SecurityUtil; //导入方法依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  connectionFactory = URLConnectionFactory
      .newDefaultURLConnectionFactory(conf);

  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = resolveNNAddr();

  boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.uri);
  boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.uri);
  // In non-HA or non-logical URI case, the code needs to call
  // getCanonicalUri() in order to handle the case where no port is
  // specified in the URI
  this.tokenServiceName = isLogicalUri ?
      HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
      : SecurityUtil.buildTokenService(getCanonicalUri());

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
            SafeModeException.class);
  } else {

    int maxFailoverAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = getHomeDirectory();
  this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
  this.disallowFallbackToInsecureCluster = !conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
  this.delegationToken = null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:WebHdfsFileSystem.java


注:本文中的org.apache.hadoop.security.SecurityUtil.buildTokenService方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。