当前位置: 首页>>代码示例>>Java>>正文


Java ConfiguredFailoverProxyProvider类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider的典型用法代码示例。如果您正苦于以下问题:Java ConfiguredFailoverProxyProvider类的具体用法?Java ConfiguredFailoverProxyProvider怎么用?Java ConfiguredFailoverProxyProvider使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ConfiguredFailoverProxyProvider类属于org.apache.hadoop.hdfs.server.namenode.ha包,在下文中一共展示了ConfiguredFailoverProxyProvider类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFailureWithMisconfiguredHaNNs

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());
  
  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDFSClientFailover.java

示例2: initHAConf

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());
  
  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);
  
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:MiniQJMHACluster.java

示例3: addHAConfiguration

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
 * Add a new HA configuration.
 */
public static void addHAConfiguration(Configuration conf,
    final String logicalName) {
  String nsIds = conf.get(DFSConfigKeys.DFS_NAMESERVICES);
  if (nsIds == null) {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
  } else { // append the nsid
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName);
  }
  conf.set(DFSUtil.addKeySuffixes(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
          logicalName), "nn1,nn2");
  conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
          "." + logicalName,
          ConfiguredFailoverProxyProvider.class.getName());
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:DFSTestUtil.java

示例4: testFailureWithMisconfiguredHaNNs

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
 * Make sure that a helpful error message is shown if a proxy provider is
 * configured for a given URI, but no actual addresses are configured for that
 * URI.
 */
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
  String logicalHost = "misconfigured-ha-uri";
  Configuration conf = new Configuration();
  conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
      ConfiguredFailoverProxyProvider.class.getName());
  
  URI uri = new URI("hdfs://" + logicalHost + "/test");
  try {
    FileSystem.get(uri, conf).exists(new Path("/test"));
    fail("Successfully got proxy provider for misconfigured FS");
  } catch (IOException ioe) {
    LOG.info("got expected exception", ioe);
    assertTrue("expected exception did not contain helpful message",
        StringUtils.stringifyException(ioe).contains(
        "Could not find any configured addresses for URI " + uri));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestDFSClientFailover.java

示例5: initHAConf

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());
  
  String address1 = "127.0.0.1:" + NN1_IPC_PORT;
  String address2 = "127.0.0.1:" + NN2_IPC_PORT;
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);
  
  return conf;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:20,代码来源:MiniQJMHACluster.java

示例6: createWebHDFSHAConfiguration

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestDFSUtil.java

示例7: setFederatedHAConfiguration

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
public static void setFederatedHAConfiguration(MiniDFSCluster cluster,
    Configuration conf) {
  Map<String, List<String>> nameservices = Maps.newHashMap();
  for (NameNodeInfo info : cluster.getNameNodeInfos()) {
    Preconditions.checkState(info.nameserviceId != null);
    List<String> nns = nameservices.get(info.nameserviceId);
    if (nns == null) {
      nns = Lists.newArrayList();
      nameservices.put(info.nameserviceId, nns);
    }
    nns.add(info.nnId);

    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
        info.nameserviceId, info.nnId),
        DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
        info.nameNode.getNameNodeAddress()).toString());
    conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
        info.nameserviceId, info.nnId),
        DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
        info.nameNode.getNameNodeAddress()).toString());
  }
  for (Map.Entry<String, List<String>> entry : nameservices.entrySet()) {
    conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,
        entry.getKey()), Joiner.on(",").join(entry.getValue()));
    conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "."
        + entry.getKey(), ConfiguredFailoverProxyProvider.class.getName());
  }
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
      .join(nameservices.keySet()));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:DFSTestUtil.java

示例8: createWebHDFSHAConfiguration

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
  HdfsConfiguration conf = new HdfsConfiguration();

  conf.set(DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
  conf.set(DFSUtil.addKeySuffixes(
      DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);

  conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
      ConfiguredFailoverProxyProvider.class.getName());
  return conf;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestDFSUtil.java

示例9: initClientHAConf

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private void initClientHAConf(int nn1port, int nn2port) throws Exception {
  hsf.setHomeDir("test-case");
  hsf.setNameNodeURL("hdfs://ns1");
  File confFile = new File(getName());
  String conf = "<configuration>\n             "
      + "  <property>\n                                    "
      + "    <name>dfs.nameservices</name>\n               "
      + "    <value>ns1</value>\n                          "
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.ha.namenodes.ns1</name>\n           "
      + "    <value>nn1,nn2</value>\n                      "
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.namenode.rpc-address.ns1.nn1</name>\n"
      + "    <value>hdfs://127.0.0.1:" + nn1port + "</value>\n"
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.namenode.rpc-address.ns1.nn2</name>\n"
      + "    <value>hdfs://127.0.0.1:" + nn2port + "</value>\n"
      + "  </property>\n                                   "
      + "  <property>\n                                    "
      + "    <name>dfs.client.failover.proxy.provider.ns1</name>\n"
      + "    <value>" + ConfiguredFailoverProxyProvider.class.getName() + "</value>\n"
      + "  </property>\n                                   "
      + "</configuration>";
  setConfigFile(hsf, confFile, conf);
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:29,代码来源:HdfsSortedOplogOrganizerJUnitTest.java

示例10: testLogicalUriShouldNotHavePorts

import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
 * Regression test for HDFS-2683.
 */
@Test
public void testLogicalUriShouldNotHavePorts() {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
      ConfiguredFailoverProxyProvider.class.getName());
  Path p = new Path("hdfs://foo:12345/");
  try {
    p.getFileSystem(conf).exists(p);
    fail("Did not fail with fake FS");
  } catch (IOException ioe) {
    GenericTestUtils.assertExceptionContains(
        "does not use port information", ioe);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestDFSClientFailover.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。