当前位置: 首页>>代码示例>>Java>>正文


Java RetryPolicies.failoverOnNetworkException方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.retry.RetryPolicies.failoverOnNetworkException方法的典型用法代码示例。如果您正苦于以下问题:Java RetryPolicies.failoverOnNetworkException方法的具体用法?Java RetryPolicies.failoverOnNetworkException怎么用?Java RetryPolicies.failoverOnNetworkException使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.retry.RetryPolicies的用法示例。


在下文中一共展示了RetryPolicies.failoverOnNetworkException方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: genClientWithDummyHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRetryCacheWithHA.java

示例2: genClientWithDummyHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxiesClient.createFailoverProxyProvider(conf,
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT,
          HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestRetryCacheWithHA.java

示例3: genClientWithDummyHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  Class<FailoverProxyProvider<ClientProtocol>> failoverProxyProviderClass = 
      NameNodeProxies.getFailoverProxyProviderClass(conf, nnUri, 
          ClientProtocol.class);
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          failoverProxyProviderClass, ClientProtocol.class, nnUri);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:TestRetryCacheWithHA.java

示例4: createProxyWithLossyRetryHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not 
 * for HA.
 * 
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop) throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  Class<FailoverProxyProvider<T>> failoverProxyProviderClass = 
      getFailoverProxyProviderClass(config, nameNodeUri, xface);
  if (failoverProxyProviderClass != null) { // HA case
    FailoverProxyProvider<T> failoverProxyProvider = 
        createFailoverProxyProvider(config, failoverProxyProviderClass, 
            xface, nameNodeUri);
    int delay = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, 
                Math.max(numResponseToDrop + 1, maxFailoverAttempts), delay, 
                maxCap));
    
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { xface }, dummyHandler);
    Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
    return new ProxyAndInfo<T>(proxy, dtService);
  } else {
    LOG.warn("Currently creating proxy using " +
    		"LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:57,代码来源:NameNodeProxies.java

示例5: createProxyWithLossyRetryHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not 
 * for HA.
 * 
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(config, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider != null) { // HA case
    int delay = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = config.getInt(
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, 
                Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, 
                maxCap));
    
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { xface }, dummyHandler);
    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  } else {
    LOG.warn("Currently creating proxy using " +
    		"LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:NameNodeProxies.java

示例6: initialize

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  connectionFactory = URLConnectionFactory
      .newDefaultURLConnectionFactory(conf);

  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = resolveNNAddr();

  boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.uri);
  boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.uri);
  // In non-HA or non-logical URI case, the code needs to call
  // getCanonicalUri() in order to handle the case where no port is
  // specified in the URI
  this.tokenServiceName = isLogicalUri ?
      HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
      : SecurityUtil.buildTokenService(getCanonicalUri());

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
            SafeModeException.class);
  } else {

    int maxFailoverAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = getHomeDirectory();
  this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
  this.disallowFallbackToInsecureCluster = !conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
  this.delegationToken = null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:64,代码来源:WebHdfsFileSystem.java

示例7: initialize

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  boolean isOAuth = conf.getBoolean(
      HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY,
      HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);

  if(isOAuth) {
    LOG.debug("Enabling OAuth2 in WebHDFS");
    connectionFactory = URLConnectionFactory
        .newOAuth2URLConnectionFactory(conf);
  } else {
    LOG.debug("Not enabling OAuth2 in WebHDFS");
    connectionFactory = URLConnectionFactory
        .newDefaultURLConnectionFactory(conf);
  }


  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = resolveNNAddr();

  boolean isHA = HAUtilClient.isClientFailoverConfigured(conf, this.uri);
  boolean isLogicalUri = isHA && HAUtilClient.isLogicalUri(conf, this.uri);
  // In non-HA or non-logical URI case, the code needs to call
  // getCanonicalUri() in order to handle the case where no port is
  // specified in the URI
  this.tokenServiceName = isLogicalUri ?
      HAUtilClient.buildTokenServiceForLogicalUri(uri, getScheme())
      : SecurityUtil.buildTokenService(getCanonicalUri());

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_KEY,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
            HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
            HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME);
  } else {

    int maxFailoverAttempts = conf.getInt(
        HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_KEY,
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_KEY,
        HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi)));
  this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
  this.disallowFallbackToInsecureCluster = !conf.getBoolean(
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
      CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
  this.delegationToken = null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:76,代码来源:WebHdfsFileSystem.java

示例8: createProxyWithLossyRetryHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not
 * for HA.
 *
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @param fallbackToSimpleAuth set to true or false during calls to indicate
 *        if a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(config, nameNodeUri, xface, true,
          fallbackToSimpleAuth);

  if (failoverProxyProvider != null) { // HA case
    int delay = config.getInt(
        HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
        HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
        HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = config.getInt(
        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
                Math.max(numResponseToDrop + 1, maxRetryAttempts), delay,
                maxCap));

    @SuppressWarnings("unchecked")
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[]{xface}, dummyHandler);
    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          DFSUtilClient.getNNAddress(nameNodeUri));
    }
    return new ProxyAndInfo<>(proxy, dtService,
        DFSUtilClient.getNNAddress(nameNodeUri));
  } else {
    LOG.warn("Currently creating proxy using " +
        "LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:70,代码来源:NameNodeProxiesClient.java

示例9: createProxyWithLossyRetryHandler

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not 
 * for HA.
 * 
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop) throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  Class<FailoverProxyProvider<T>> failoverProxyProviderClass = 
      getFailoverProxyProviderClass(config, nameNodeUri, xface);
  if (failoverProxyProviderClass != null) { // HA case
    FailoverProxyProvider<T> failoverProxyProvider = 
        createFailoverProxyProvider(config, failoverProxyProviderClass, 
            xface, nameNodeUri);
    int delay = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = config.getInt(
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, 
                Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, 
                maxCap));
    
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { xface }, dummyHandler);
    Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
    return new ProxyAndInfo<T>(proxy, dtService);
  } else {
    LOG.warn("Currently creating proxy using " +
    		"LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:60,代码来源:NameNodeProxies.java

示例10: initialize

import org.apache.hadoop.io.retry.RetryPolicies; //导入方法依赖的package包/类
@Override
public synchronized void initialize(URI uri, Configuration conf
    ) throws IOException {
  super.initialize(uri, conf);
  setConf(conf);
  /** set user pattern based on configuration file */
  UserParam.setUserPattern(conf.get(
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
      DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));

  connectionFactory = URLConnectionFactory
      .newDefaultURLConnectionFactory(conf);

  ugi = UserGroupInformation.getCurrentUser();
  this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
  this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf);

  boolean isHA = HAUtil.isLogicalUri(conf, this.uri);
  // In non-HA case, the code needs to call getCanonicalUri() in order to
  // handle the case where no port is specified in the URI
  this.tokenServiceName = isHA ? HAUtil.buildTokenServiceForLogicalUri(uri)
      : SecurityUtil.buildTokenService(getCanonicalUri());
  initializeTokenAspect();

  if (!isHA) {
    this.retryPolicy =
        RetryUtils.getDefaultRetryPolicy(
            conf,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
            DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
            SafeModeException.class);
  } else {

    int maxFailoverAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    int failoverSleepBaseMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int failoverSleepMaxMillis = conf.getInt(
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);

    this.retryPolicy = RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis);
  }

  this.workingDir = getHomeDirectory();

  if (UserGroupInformation.isSecurityEnabled()) {
    tokenAspect.initDelegationToken(ugi);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:61,代码来源:WebHdfsFileSystem.java


注:本文中的org.apache.hadoop.io.retry.RetryPolicies.failoverOnNetworkException方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。