本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider类的典型用法代码示例。如果您正苦于以下问题:Java ConfiguredFailoverProxyProvider类的具体用法?Java ConfiguredFailoverProxyProvider怎么用?Java ConfiguredFailoverProxyProvider使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ConfiguredFailoverProxyProvider类属于org.apache.hadoop.hdfs.server.namenode.ha包,在下文中一共展示了ConfiguredFailoverProxyProvider类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testFailureWithMisconfiguredHaNNs
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
* Make sure that a helpful error message is shown if a proxy provider is
* configured for a given URI, but no actual addresses are configured for that
* URI.
*/
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
String logicalHost = "misconfigured-ha-uri";
Configuration conf = new Configuration();
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
ConfiguredFailoverProxyProvider.class.getName());
URI uri = new URI("hdfs://" + logicalHost + "/test");
try {
FileSystem.get(uri, conf).exists(new Path("/test"));
fail("Successfully got proxy provider for misconfigured FS");
} catch (IOException ioe) {
LOG.info("got expected exception", ioe);
assertTrue("expected exception did not contain helpful message",
StringUtils.stringifyException(ioe).contains(
"Could not find any configured addresses for URI " + uri));
}
}
示例2: initHAConf
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private Configuration initHAConf(URI journalURI, Configuration conf) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
journalURI.toString());
String address1 = "127.0.0.1:" + basePort;
String address2 = "127.0.0.1:" + (basePort + 2);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
NAMESERVICE, NN1), address1);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
NAMESERVICE, NN2), address2);
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
NN1 + "," + NN2);
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
ConfiguredFailoverProxyProvider.class.getName());
conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);
return conf;
}
示例3: addHAConfiguration
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
* Add a new HA configuration.
*/
public static void addHAConfiguration(Configuration conf,
final String logicalName) {
String nsIds = conf.get(DFSConfigKeys.DFS_NAMESERVICES);
if (nsIds == null) {
conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
} else { // append the nsid
conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName);
}
conf.set(DFSUtil.addKeySuffixes(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
logicalName), "nn1,nn2");
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
"." + logicalName,
ConfiguredFailoverProxyProvider.class.getName());
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
}
示例4: testFailureWithMisconfiguredHaNNs
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
* Make sure that a helpful error message is shown if a proxy provider is
* configured for a given URI, but no actual addresses are configured for that
* URI.
*/
@Test
public void testFailureWithMisconfiguredHaNNs() throws Exception {
String logicalHost = "misconfigured-ha-uri";
Configuration conf = new Configuration();
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,
ConfiguredFailoverProxyProvider.class.getName());
URI uri = new URI("hdfs://" + logicalHost + "/test");
try {
FileSystem.get(uri, conf).exists(new Path("/test"));
fail("Successfully got proxy provider for misconfigured FS");
} catch (IOException ioe) {
LOG.info("got expected exception", ioe);
assertTrue("expected exception did not contain helpful message",
StringUtils.stringifyException(ioe).contains(
"Could not find any configured addresses for URI " + uri));
}
}
示例5: initHAConf
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private Configuration initHAConf(URI journalURI, Configuration conf) {
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
journalURI.toString());
String address1 = "127.0.0.1:" + NN1_IPC_PORT;
String address2 = "127.0.0.1:" + NN2_IPC_PORT;
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
NAMESERVICE, NN1), address1);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
NAMESERVICE, NN2), address2);
conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
NN1 + "," + NN2);
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
ConfiguredFailoverProxyProvider.class.getName());
conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);
return conf;
}
示例6: createWebHDFSHAConfiguration
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
ConfiguredFailoverProxyProvider.class.getName());
return conf;
}
示例7: setFederatedHAConfiguration
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
public static void setFederatedHAConfiguration(MiniDFSCluster cluster,
Configuration conf) {
Map<String, List<String>> nameservices = Maps.newHashMap();
for (NameNodeInfo info : cluster.getNameNodeInfos()) {
Preconditions.checkState(info.nameserviceId != null);
List<String> nns = nameservices.get(info.nameserviceId);
if (nns == null) {
nns = Lists.newArrayList();
nameservices.put(info.nameserviceId, nns);
}
nns.add(info.nnId);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
info.nameserviceId, info.nnId),
DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
info.nameserviceId, info.nnId),
DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME,
info.nameNode.getNameNodeAddress()).toString());
}
for (Map.Entry<String, List<String>> entry : nameservices.entrySet()) {
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,
entry.getKey()), Joiner.on(",").join(entry.getValue()));
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "."
+ entry.getKey(), ConfiguredFailoverProxyProvider.class.getName());
}
conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",")
.join(nameservices.keySet()));
}
示例8: createWebHDFSHAConfiguration
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
HdfsConfiguration conf = new HdfsConfiguration();
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);
conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
ConfiguredFailoverProxyProvider.class.getName());
return conf;
}
示例9: initClientHAConf
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
private void initClientHAConf(int nn1port, int nn2port) throws Exception {
hsf.setHomeDir("test-case");
hsf.setNameNodeURL("hdfs://ns1");
File confFile = new File(getName());
String conf = "<configuration>\n "
+ " <property>\n "
+ " <name>dfs.nameservices</name>\n "
+ " <value>ns1</value>\n "
+ " </property>\n "
+ " <property>\n "
+ " <name>dfs.ha.namenodes.ns1</name>\n "
+ " <value>nn1,nn2</value>\n "
+ " </property>\n "
+ " <property>\n "
+ " <name>dfs.namenode.rpc-address.ns1.nn1</name>\n"
+ " <value>hdfs://127.0.0.1:" + nn1port + "</value>\n"
+ " </property>\n "
+ " <property>\n "
+ " <name>dfs.namenode.rpc-address.ns1.nn2</name>\n"
+ " <value>hdfs://127.0.0.1:" + nn2port + "</value>\n"
+ " </property>\n "
+ " <property>\n "
+ " <name>dfs.client.failover.proxy.provider.ns1</name>\n"
+ " <value>" + ConfiguredFailoverProxyProvider.class.getName() + "</value>\n"
+ " </property>\n "
+ "</configuration>";
setConfigFile(hsf, confFile, conf);
}
示例10: testLogicalUriShouldNotHavePorts
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; //导入依赖的package包/类
/**
* Regression test for HDFS-2683.
*/
@Test
public void testLogicalUriShouldNotHavePorts() {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
ConfiguredFailoverProxyProvider.class.getName());
Path p = new Path("hdfs://foo:12345/");
try {
p.getFileSystem(conf).exists(p);
fail("Did not fail with fake FS");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"does not use port information", ioe);
}
}