当前位置: 首页>>代码示例>>Java>>正文


Java FailoverProxyProvider类代码示例

本文整理汇总了Java中org.apache.hadoop.io.retry.FailoverProxyProvider的典型用法代码示例。如果您正苦于以下问题:Java FailoverProxyProvider类的具体用法?Java FailoverProxyProvider怎么用?Java FailoverProxyProvider使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FailoverProxyProvider类属于org.apache.hadoop.io.retry包,在下文中一共展示了FailoverProxyProvider类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFailoverProxyProviderClass

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/** Gets the configured Failover proxy provider's class */
@VisibleForTesting
public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
    Configuration conf, URI nameNodeUri) throws IOException {
  if (nameNodeUri == null) {
    return null;
  }
  String host = nameNodeUri.getHost();

  String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "."
      + host;
  try {
    @SuppressWarnings("unchecked")
    Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>) conf
        .getClass(configKey, null, FailoverProxyProvider.class);
    return ret;
  } catch (RuntimeException e) {
    if (e.getCause() instanceof ClassNotFoundException) {
      throw new IOException("Could not load failover proxy provider class "
          + conf.get(configKey) + " which is configured for authority "
          + nameNodeUri, e);
    } else {
      throw e;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:NameNodeProxies.java

示例2: genClientWithDummyHandler

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRetryCacheWithHA.java

示例3: getFailoverProxyProviderClass

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/** Gets the configured Failover proxy provider's class */
@VisibleForTesting
public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
    Configuration conf, URI nameNodeUri) throws IOException {
  if (nameNodeUri == null) {
    return null;
  }
  String host = nameNodeUri.getHost();
  String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
      + "." + host;
  try {
    @SuppressWarnings("unchecked")
    Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>)
        conf.getClass(configKey, null, FailoverProxyProvider.class);
    return ret;
  } catch (RuntimeException e) {
    if (e.getCause() instanceof ClassNotFoundException) {
      throw new IOException("Could not load failover proxy provider class "
          + conf.get(configKey) + " which is configured for authority "
          + nameNodeUri, e);
    } else {
      throw e;
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:NameNodeProxiesClient.java

示例4: genClientWithDummyHandler

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxiesClient.createFailoverProxyProvider(conf,
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT,
          HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestRetryCacheWithHA.java

示例5: createFailoverProxyProvider

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static <T> FailoverProxyProvider<T> createFailoverProxyProvider(
    Configuration conf, Class<FailoverProxyProvider<T>> failoverProxyProviderClass,
    Class<T> xface, String jtAddress) throws IOException {
  Preconditions.checkArgument(
      xface.isAssignableFrom(JTProtocols.class),
      "Interface %s is not a JobTracker protocol", xface);
  try {
    Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
        .getConstructor(Configuration.class, String.class, Class.class);
    FailoverProxyProvider<?> provider = ctor.newInstance(conf, jtAddress,
        xface);
    return (FailoverProxyProvider<T>) provider;
  } catch (Exception e) {
    String message = "Couldn't create proxy provider " + failoverProxyProviderClass;
    if (LOG.isDebugEnabled()) {
      LOG.debug(message, e);
    }
    if (e.getCause() instanceof IOException) {
      throw (IOException) e.getCause();
    } else {
      throw new IOException(message, e);
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:26,代码来源:JobTrackerProxies.java

示例6: createProxy

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/**
 * Creates the namenode proxy with the passed protocol. This will handle
 * creation of either HA- or non-HA-enabled proxy objects, depending upon
 * if the provided URI is a configured logical URI.
 * 
 * @param conf the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException if there is an error creating the proxy
 **/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
    URI nameNodeUri, Class<T> xface) throws IOException {
  Class<FailoverProxyProvider<T>> failoverProxyProviderClass =
      getFailoverProxyProviderClass(conf, nameNodeUri, xface);

  if (failoverProxyProviderClass == null) {
    // Non-HA case
    return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
        UserGroupInformation.getCurrentUser(), true);
  } else {
    // HA case
    FailoverProxyProvider<T> failoverProxyProvider = NameNodeProxies
        .createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
            nameNodeUri);
    Conf config = new Conf(conf);
    T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            config.maxFailoverAttempts, config.failoverSleepBaseMillis,
            config.failoverSleepMaxMillis));
    
    Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
    return new ProxyAndInfo<T>(proxy, dtService);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:40,代码来源:NameNodeProxies.java

示例7: createFailoverProxyProvider

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/** Creates the Failover proxy provider instance*/
@VisibleForTesting
public static <T> FailoverProxyProvider<T> createFailoverProxyProvider(
    Configuration conf, Class<FailoverProxyProvider<T>> failoverProxyProviderClass,
    Class<T> xface, URI nameNodeUri) throws IOException {
  Preconditions.checkArgument(
      xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface %s is not a NameNode protocol", xface);
  try {
    Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
        .getConstructor(Configuration.class, URI.class, Class.class);
    FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri,
        xface);
    return provider;
  } catch (Exception e) {
    String message = "Couldn't create proxy provider " + failoverProxyProviderClass;
    if (LOG.isDebugEnabled()) {
      LOG.debug(message, e);
    }
    if (e.getCause() instanceof IOException) {
      throw (IOException) e.getCause();
    } else {
      throw new IOException(message, e);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:NameNodeProxies.java

示例8: genClientWithDummyHandler

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  Class<FailoverProxyProvider<ClientProtocol>> failoverProxyProviderClass = 
      NameNodeProxies.getFailoverProxyProviderClass(conf, nnUri, 
          ClientProtocol.class);
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          failoverProxyProviderClass, ClientProtocol.class, nnUri);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:TestRetryCacheWithHA.java

示例9: testNoRetryOnInvalidToken

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/**
 * Test that there is no retry when invalid token exception is thrown.
 * Verfies fix for HADOOP-12054
 */
@Test(expected = InvalidToken.class)
public void testNoRetryOnInvalidToken() throws IOException {
  final Client client = new Client(LongWritable.class, conf);
  final TestServer server = new TestServer(1, false);
  TestInvalidTokenHandler handler =
      new TestInvalidTokenHandler(client, server);
  DummyProtocol proxy = (DummyProtocol) Proxy.newProxyInstance(
      DummyProtocol.class.getClassLoader(),
      new Class[] { DummyProtocol.class }, handler);
  FailoverProxyProvider<DummyProtocol> provider =
      new DefaultFailoverProxyProvider<DummyProtocol>(
          DummyProtocol.class, proxy);
  DummyProtocol retryProxy =
      (DummyProtocol) RetryProxy.create(DummyProtocol.class, provider,
      RetryPolicies.failoverOnNetworkException(
          RetryPolicies.TRY_ONCE_THEN_FAIL, 100, 100, 10000, 0));

  try {
    server.start();
    retryProxy.dummyRun();
  } finally {
    // Check if dummyRun called only once
    Assert.assertEquals(handler.invocations, 1);
    Client.setCallIdAndRetryCount(0, 0);
    client.stop();
    server.stop();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:33,代码来源:TestIPC.java

示例10: createProxy

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
    String jtAddress, Class<T> xface) throws IOException {
  Class<FailoverProxyProvider<T>> failoverProxyProviderClass =
      getFailoverProxyProviderClass(conf, jtAddress, xface);

  if (failoverProxyProviderClass == null) {
    // Non-HA case
    return createNonHAProxy(conf, NetUtils.createSocketAddr(jtAddress), xface,
        UserGroupInformation.getCurrentUser(), true);
  } else {
    // HA case
    FailoverProxyProvider<T> failoverProxyProvider = 
        createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
            jtAddress);
    int maxFailoverAttempts =
      conf.getInt(HAUtil.MR_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
          HAUtil.MR_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    long failoverSleepBaseMillis =
      conf.getInt(HAUtil.MR_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
          HAUtil.MR_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    long failoverSleepMaxMillis =
      conf.getInt(HAUtil.MR_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
          HAUtil.MR_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
    T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies
        .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
            maxFailoverAttempts, failoverSleepBaseMillis,
            failoverSleepMaxMillis));
    
    Text dtService = HAUtil.buildTokenServiceForLogicalAddress(jtAddress);
    return new ProxyAndInfo<T>(proxy, dtService);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:JobTrackerProxies.java

示例11: getFailoverProxyProviderClass

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
private static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
    Configuration conf, String jtAddress, Class<T> xface) throws IOException {
  if (jtAddress == null) {
    return null;
  }
  String configKey = DFSUtil.addKeySuffixes(
      HAUtil.MR_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX,
      HAUtil.getLogicalName(jtAddress));
  return (Class<FailoverProxyProvider<T>>)
    conf.getClass(configKey, null, FailoverProxyProvider.class);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:12,代码来源:JobTrackerProxies.java

示例12: getFailoverProxyProviderClass

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/** Gets the configured Failover proxy provider's class */
@VisibleForTesting
public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
    Configuration conf, URI nameNodeUri, Class<T> xface) throws IOException {
  if (nameNodeUri == null) {
    return null;
  }
  String host = nameNodeUri.getHost();

  String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "."
      + host;
  try {
    @SuppressWarnings("unchecked")
    Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>) conf
        .getClass(configKey, null, FailoverProxyProvider.class);
    if (ret != null) {
      // If we found a proxy provider, then this URI should be a logical NN.
      // Given that, it shouldn't have a non-default port number.
      int port = nameNodeUri.getPort();
      if (port > 0 && port != NameNode.DEFAULT_PORT) {
        throw new IOException("Port " + port + " specified in URI "
            + nameNodeUri + " but host '" + host
            + "' is a logical (HA) namenode"
            + " and does not use port information.");
      }
    }
    return ret;
  } catch (RuntimeException e) {
    if (e.getCause() instanceof ClassNotFoundException) {
      throw new IOException("Could not load failover proxy provider class "
          + conf.get(configKey) + " which is configured for authority "
          + nameNodeUri, e);
    } else {
      throw e;
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:38,代码来源:NameNodeProxies.java

示例13: getFailoverProxyProviderClass

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/**
 * Gets the configured Failover proxy provider's class
 */
private static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
    Configuration conf, URI nameNodeUri, Class<T> xface) throws IOException {
  if (nameNodeUri == null) {
    return null;
  }
  String host = nameNodeUri.getHost();

  String configKey =
      DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + host;
  try {
    @SuppressWarnings("unchecked")
    Class<FailoverProxyProvider<T>> ret =
        (Class<FailoverProxyProvider<T>>) conf
            .getClass(configKey, null, FailoverProxyProvider.class);
    if (ret != null) {
      // If we found a proxy provider, then this URI should be a logical NN.
      // Given that, it shouldn't have a non-default port number.
      int port = nameNodeUri.getPort();
      if (port > 0 && port != NameNode.DEFAULT_PORT) {
        throw new IOException(
            "Port " + port + " specified in URI " + nameNodeUri +
                " but host '" + host + "' is a logical (HA) namenode" +
                " and does not use port information.");
      }
    }
    return ret;
  } catch (RuntimeException e) {
    if (e.getCause() instanceof ClassNotFoundException) {
      throw new IOException("Could not load failover proxy provider class " +
          conf.get(configKey) + " which is configured for authority " +
          nameNodeUri, e);
    } else {
      throw e;
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:40,代码来源:NameNodeProxies.java

示例14: createFailoverProxyProvider

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/**
 * Creates the Failover proxy provider instance
 */
@SuppressWarnings("unchecked")
private static <T> FailoverProxyProvider<T> createFailoverProxyProvider(
    Configuration conf,
    Class<FailoverProxyProvider<T>> failoverProxyProviderClass,
    Class<T> xface, URI nameNodeUri) throws IOException {
  Preconditions.checkArgument(xface.isAssignableFrom(NamenodeProtocols.class),
      "Interface %s is not a NameNode protocol", xface);
  try {
    Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
        .getConstructor(Configuration.class, URI.class, Class.class);
    FailoverProxyProvider<?> provider =
        ctor.newInstance(conf, nameNodeUri, xface);
    return (FailoverProxyProvider<T>) provider;
  } catch (Exception e) {
    String message =
        "Couldn't create proxy provider " + failoverProxyProviderClass;
    if (LOG.isDebugEnabled()) {
      LOG.debug(message, e);
    }
    if (e.getCause() instanceof IOException) {
      throw (IOException) e.getCause();
    } else {
      throw new IOException(message, e);
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:NameNodeProxies.java

示例15: testNoRetryOnInvalidToken

import org.apache.hadoop.io.retry.FailoverProxyProvider; //导入依赖的package包/类
/**
 * Test that there is no retry when invalid token exception is thrown.
 * Verfies fix for HADOOP-12054
 */
@Test(expected = InvalidToken.class)
public void testNoRetryOnInvalidToken() throws IOException {
  final Client client = new Client(LongWritable.class, conf);
  final TestServer server = new TestServer(1, false);
  TestInvalidTokenHandler handler =
      new TestInvalidTokenHandler(client, server);
  DummyProtocol proxy = (DummyProtocol) Proxy.newProxyInstance(
      DummyProtocol.class.getClassLoader(),
      new Class[] { DummyProtocol.class }, handler);
  FailoverProxyProvider<DummyProtocol> provider =
      new DefaultFailoverProxyProvider<DummyProtocol>(
          DummyProtocol.class, proxy);
  DummyProtocol retryProxy =
      (DummyProtocol) RetryProxy.create(DummyProtocol.class, provider,
      RetryPolicies.failoverOnNetworkException(
          RetryPolicies.TRY_ONCE_THEN_FAIL, 100, 100, 10000, 0));

  try {
    server.start();
    retryProxy.dummyRun();
  } finally {
    // Check if dummyRun called only once
    Assert.assertEquals(handler.invocations, 1);
    Client.setCallIdAndRetryCount(0, 0, null);
    client.stop();
    server.stop();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:33,代码来源:TestIPC.java


注:本文中的org.apache.hadoop.io.retry.FailoverProxyProvider类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。