当前位置: 首页>>代码示例>>Java>>正文


Java RetryUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.io.retry.RetryUtils的典型用法代码示例。如果您正苦于以下问题:Java RetryUtils类的具体用法?Java RetryUtils怎么用?Java RetryUtils使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RetryUtils类属于org.apache.hadoop.io.retry包,在下文中一共展示了RetryUtils类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  ugi = UserGroupInformation.getCurrentUser();
  try {
    this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
  } catch (URISyntaxException e) {
    throw new IllegalArgumentException(e);
  }
  this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
  this.retryPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);
  this.workingDir = getHomeDirectory();

  if (UserGroupInformation.isSecurityEnabled()) {
    initDelegationToken();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:WebHdfsFileSystem.java

示例2: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf)
    throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  ugi = UserGroupInformation.getCurrentUser();
  try {
    this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
  } catch (URISyntaxException e) {
    throw new IllegalArgumentException(e);
  }
  this.nnAddr =
      NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
  this.retryPolicy = RetryUtils.getDefaultRetryPolicy(conf,
      DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
      DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
      DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
      DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
      SafeModeException.class);
  this.workingDir = getHomeDirectory();

  if (UserGroupInformation.isSecurityEnabled()) {
    initDelegationToken();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:WebHdfsFileSystem.java

示例3: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  ugi = UserGroupInformation.getCurrentUser();
  try {
    this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
  } catch (URISyntaxException e) {
    throw new IllegalArgumentException(e);
  }
  this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
  this.retryPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);
  this.workingDir = getHomeDirectory();

  if (UserGroupInformation.isSecurityEnabled()) {
    initDelegationToken();
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:27,代码来源:WebHdfsFileSystem.java

示例4: createRPCProxy

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
private static JobSubmissionProtocol createRPCProxy(InetSocketAddress addr,
    Configuration conf) throws IOException {
  
  JobSubmissionProtocol rpcJobSubmitClient = 
      (JobSubmissionProtocol)RPC.getProxy(
          JobSubmissionProtocol.class,
          JobSubmissionProtocol.versionID, addr, 
          UserGroupInformation.getCurrentUser(), conf,
          NetUtils.getSocketFactory(conf, JobSubmissionProtocol.class), 
          0,
          RetryUtils.getMultipleLinearRandomRetry(
              conf,
              MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY,
              MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
              MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_KEY,
              MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT
              )
          );
  
  return rpcJobSubmitClient;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:22,代码来源:JobClient.java

示例5: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);

  this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
  this.retryPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class
          );
  this.workingDir = getHomeDirectory();

  if (UserGroupInformation.isSecurityEnabled()) {
    initDelegationToken();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:23,代码来源:WebHdfsFileSystem.java

示例6: createNNProxyWithClientProtocol

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);
  
  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();
  
    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:NameNodeProxies.java

示例7: createNonHAProxyWithClientProtocol

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
public static ClientProtocol createNonHAProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
      ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy =
      RetryUtils.getDefaultRetryPolicy(
          conf,
          HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY,
          HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
          HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
          HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
          SafeModeException.class.getName());

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries
    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
    ClientProtocol translatorProxy =
        new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<>(ClientProtocol.class,
            translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:38,代码来源:NameNodeProxiesClient.java

示例8: createProxy

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
private static JobSubmissionProtocol createProxy(
    JobSubmissionProtocol rpcJobSubmitClient,
    Configuration conf) throws IOException {

  RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf,
          MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY,
          MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
          MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_KEY,
          MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class
          ); 
  
  /* 
   * Method specific retry policies for killJob and killTask...
   * 
   * No retries on any exception including 
   * ConnectionException and SafeModeException
   */
  Map<String,RetryPolicy> methodNameToPolicyMap = 
      new HashMap<String,RetryPolicy>();
  methodNameToPolicyMap.put("killJob", RetryPolicies.TRY_ONCE_THEN_FAIL);
  methodNameToPolicyMap.put("killTask", RetryPolicies.TRY_ONCE_THEN_FAIL);
  
  return (JobSubmissionProtocol) RetryProxy.create(JobSubmissionProtocol.class,
      rpcJobSubmitClient, defaultPolicy, methodNameToPolicyMap);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:29,代码来源:JobClient.java

示例9: createRPCNamenode

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
private static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
  Configuration conf, UserGroupInformation ugi) 
throws IOException {
return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
    ClientProtocol.versionID, nameNodeAddr, ugi, conf,
    NetUtils.getSocketFactory(conf, ClientProtocol.class), 0,
    RetryUtils.getMultipleLinearRandomRetry(
            conf, 
            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT
            ));  
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:15,代码来源:DFSClient.java

示例10: createNamenode

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
private static ClientProtocol createNamenode(ClientProtocol rpcNamenode,
    Configuration conf) throws IOException {
  //default policy
  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class
          );
  
  //create policy
  RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
      5, LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
  
  Map<Class<? extends Exception>,RetryPolicy> remoteExceptionToPolicyMap =
    new HashMap<Class<? extends Exception>, RetryPolicy>();
  remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, createPolicy);

  Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
    new HashMap<Class<? extends Exception>, RetryPolicy>();
  exceptionToPolicyMap.put(RemoteException.class, 
      RetryPolicies.retryByRemoteException(
          defaultPolicy, remoteExceptionToPolicyMap));
  RetryPolicy methodPolicy = RetryPolicies.retryByException(
      defaultPolicy, exceptionToPolicyMap);
  Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
  
  methodNameToPolicyMap.put("create", methodPolicy);

  return (ClientProtocol) RetryProxy.create(ClientProtocol.class,
      rpcNamenode, defaultPolicy, methodNameToPolicyMap);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:36,代码来源:DFSClient.java

示例11: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  connectionFactory = URLConnectionFactory
      .newDefaultURLConnectionFactory(conf);

  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = resolveNNAddr();

  boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.uri);
  boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.uri);
  // In non-HA or non-logical URI case, the code needs to call
  // getCanonicalUri() in order to handle the case where no port is
  // specified in the URI
  this.tokenServiceName = isLogicalUri ?
      HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
      : SecurityUtil.buildTokenService(getCanonicalUri());

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
            SafeModeException.class);
  } else {

    int maxFailoverAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = getHomeDirectory();
  this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
  this.disallowFallbackToInsecureCluster = !conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
  this.delegationToken = null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:WebHdfsFileSystem.java

示例12: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  boolean isOAuth = conf.getBoolean(
      HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY,
      HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);

  if(isOAuth) {
    LOG.debug("Enabling OAuth2 in WebHDFS");
    connectionFactory = URLConnectionFactory
        .newOAuth2URLConnectionFactory(conf);
  } else {
    LOG.debug("Not enabling OAuth2 in WebHDFS");
    connectionFactory = URLConnectionFactory
        .newDefaultURLConnectionFactory(conf);
  }


  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = resolveNNAddr();

  boolean isHA = HAUtilClient.isClientFailoverConfigured(conf, this.uri);
  boolean isLogicalUri = isHA && HAUtilClient.isLogicalUri(conf, this.uri);
  // In non-HA or non-logical URI case, the code needs to call
  // getCanonicalUri() in order to handle the case where no port is
  // specified in the URI
  this.tokenServiceName = isLogicalUri ?
      HAUtilClient.buildTokenServiceForLogicalUri(uri, getScheme())
      : SecurityUtil.buildTokenService(getCanonicalUri());

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_KEY,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
            HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME);
  } else {

    int maxFailoverAttempts = conf.getInt(
        HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_KEY,
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_KEY,
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi)));
  this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
  this.disallowFallbackToInsecureCluster = !conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
  this.delegationToken = null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:76,代码来源:WebHdfsFileSystem.java

示例13: createNNProxyWithClientProtocol

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);
  
  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    RetryPolicy createPolicy = RetryPolicies
        .retryUpToMaximumCountWithFixedSleep(5,
            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
  
    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap 
               = new HashMap<Class<? extends Exception>, RetryPolicy>();
    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
        createPolicy);

    RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
        defaultPolicy, remoteExceptionToPolicyMap);
    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();
  
    methodNameToPolicyMap.put("create", methodPolicy);

    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:53,代码来源:NameNodeProxies.java

示例14: initialize

import org.apache.hadoop.io.retry.RetryUtils; //导入依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  connectionFactory = URLConnectionFactory
      .newDefaultURLConnectionFactory(conf);

  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);

  boolean isHA = HAUtil.isLogicalUri(conf, this.uri);
  // In non-HA case, the code needs to call getCanonicalUri() in order to
  // handle the case where no port is specified in the URI
  this.tokenServiceName = isHA ? HAUtil.buildTokenServiceForLogicalUri(uri)
      : SecurityUtil.buildTokenService(getCanonicalUri());
  initializeTokenAspect();

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
            SafeModeException.class);
  } else {

    int maxFailoverAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = getHomeDirectory();

  if (UserGroupInformation.isSecurityEnabled()) {
    tokenAspect.initDelegationToken(ugi);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:61,代码来源:WebHdfsFileSystem.java


注:本文中的org.apache.hadoop.io.retry.RetryUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。