本文整理汇总了Java中org.apache.hadoop.net.DNS.getDefaultHost方法的典型用法代码示例。如果您正苦于以下问题:Java DNS.getDefaultHost方法的具体用法?Java DNS.getDefaultHost怎么用?Java DNS.getDefaultHost使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.net.DNS
的用法示例。
在下文中一共展示了DNS.getDefaultHost方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getLocalHostName
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
/**
* Retrieve the name of the current host. Multihomed hosts may restrict the
* hostname lookup to a specific interface and nameserver with {@link
* org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_INTERFACE_KEY}
* and {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_NAMESERVER_KEY}
*
* @param conf Configuration object. May be null.
* @return
* @throws UnknownHostException
*/
static String getLocalHostName(@Nullable Configuration conf)
throws UnknownHostException {
if (conf != null) {
String dnsInterface = conf.get(HADOOP_SECURITY_DNS_INTERFACE_KEY);
String nameServer = conf.get(HADOOP_SECURITY_DNS_NAMESERVER_KEY);
if (dnsInterface != null) {
return DNS.getDefaultHost(dnsInterface, nameServer, true);
} else if (nameServer != null) {
throw new IllegalArgumentException(HADOOP_SECURITY_DNS_NAMESERVER_KEY +
" requires " + HADOOP_SECURITY_DNS_INTERFACE_KEY + ". Check your" +
"configuration.");
}
}
// Fallback to querying the default hostname as we did before.
return InetAddress.getLocalHost().getCanonicalHostName();
}
示例2: init
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
@Override
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");
// Take the hostname from the DNS class.
Configuration conf = new Configuration();
if (conf.get("slave.host.name") != null) {
hostName = conf.get("slave.host.name");
} else {
try {
hostName = DNS.getDefaultHost(
conf.get("dfs.datanode.dns.interface","default"),
conf.get("dfs.datanode.dns.nameserver","default"));
} catch (UnknownHostException uhe) {
LOG.error(uhe);
hostName = "UNKNOWN.example.com";
}
}
}
示例3: register
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
示例4: init
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");
// Take the hostname from the DNS class.
Configuration conf = new Configuration();
if (conf.get("slave.host.name") != null) {
hostName = conf.get("slave.host.name");
} else {
try {
hostName = DNS.getDefaultHost(
conf.get("dfs.datanode.dns.interface","default"),
conf.get("dfs.datanode.dns.nameserver","default"));
} catch (UnknownHostException uhe) {
LOG.error(uhe);
hostName = "UNKNOWN.example.com";
}
}
}
示例5: getHostName
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
/**
* Returns the hostname for this datanode. If the hostname is not
* explicitly configured in the given config, then it is determined
* via the DNS class.
*
* @param config configuration
* @return the hostname (NB: may not be a FQDN)
* @throws UnknownHostException if the dfs.datanode.dns.interface
* option is used and the hostname can not be determined
*/
private static String getHostName(Configuration config)
throws UnknownHostException {
String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) {
String dnsInterface = config.get(
CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
String nameServer = config.get(
CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
boolean fallbackToHosts = false;
if (dnsInterface == null) {
// Try the legacy configuration keys.
dnsInterface = config.get(DFS_DATANODE_DNS_INTERFACE_KEY);
nameServer = config.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
} else {
// If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
// resolution if DNS fails. We will not use hosts file resolution
// by default to avoid breaking existing clusters.
fallbackToHosts = true;
}
name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
}
return name;
}
示例6: register
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
dnRegistration.setNamespaceInfo(nsInfo);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
dataNodeProto.blockReport(dnRegistration, bpid, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
示例7: register
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
示例8: register
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例9: register
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"), "", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""), new ExportedBlockKeys(),
VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {new StorageBlockReport(storage,
BlockReport.builder(NUM_BUCKETS).build())};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例10: Standby
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
Standby(AvatarNode avatarNode, Configuration startupConf, Configuration conf)
throws IOException {
this.running = true;
this.avatarNode = avatarNode;
this.confg = conf;
this.startupConf = startupConf;
this.fsImage = avatarNode.getFSImage();
this.fsnamesys = avatarNode.getNamesystem();
this.sleepBetweenErrors = startupConf.getInt("hdfs.avatarnode.sleep", 5000);
initSecondary(startupConf); // start webserver for secondary namenode
this.machineName =
DNS.getDefaultHost(conf.get("dfs.namenode.dns.interface","default"),
conf.get("dfs.namenode.dns.nameserver","default"));
LOG.info("machineName=" + machineName);
this.editsFile = this.avatarNode.getRemoteEditsFile(conf);
this.editsFileNew = this.avatarNode.getRemoteEditsFileNew(conf);
InetSocketAddress addr = NameNode.getAddress(conf);
this.tmpImageFileForValidation = new File("/tmp",
"hadoop_image." + addr.getHostName() + ":" + addr.getPort());
checkpointStatus("No checkpoint initiated");
}
示例11: register
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例12: createRpcProgramNfs3
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config,
DatagramSocket registrationSocket, boolean allowInsecurePorts)
throws IOException {
DefaultMetricsSystem.initialize("Nfs3");
String displayName = DNS.getDefaultHost("default", "default")
+ config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT);
metrics = Nfs3Metrics.create(config, displayName);
return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts);
}
示例13: getHostName
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
/**
* Returns the hostname for this datanode. If the hostname is not
* explicitly configured in the given config, then it is determined
* via the DNS class.
*
* @param config configuration
* @return the hostname (NB: may not be a FQDN)
* @throws UnknownHostException if the dfs.datanode.dns.interface
* option is used and the hostname can not be determined
*/
private static String getHostName(Configuration config)
throws UnknownHostException {
String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) {
name = DNS.getDefaultHost(
config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
DFS_DATANODE_DNS_INTERFACE_DEFAULT),
config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
}
return name;
}
示例14: init
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
public void init(SubsetConfiguration conf) {
LOG.debug("Initializing the GangliaSink for Ganglia metrics.");
this.conf = conf;
// Take the hostname from the DNS class.
if (conf.getString("slave.host.name") != null) {
hostName = conf.getString("slave.host.name");
} else {
try {
hostName = DNS.getDefaultHost(
conf.getString("dfs.datanode.dns.interface", "default"),
conf.getString("dfs.datanode.dns.nameserver", "default"));
} catch (UnknownHostException uhe) {
LOG.error(uhe);
hostName = "UNKNOWN.example.com";
}
}
// load the gannglia servers from properties
metricsServers = Servers.parse(conf.getString(SERVERS_PROPERTY),
DEFAULT_PORT);
// extract the Ganglia conf per metrics
gangliaConfMap = new HashMap<String, GangliaConf>();
loadGangliaConf(GangliaConfType.units);
loadGangliaConf(GangliaConfType.tmax);
loadGangliaConf(GangliaConfType.dmax);
loadGangliaConf(GangliaConfType.slope);
try {
datagramSocket = new DatagramSocket();
} catch (SocketException se) {
LOG.error(se);
}
// see if sparseMetrics is supported. Default is false
supportSparseMetrics = conf.getBoolean(SUPPORT_SPARSE_METRICS_PROPERTY,
SUPPORT_SPARSE_METRICS_DEFAULT);
}
示例15: getNodeName
import org.apache.hadoop.net.DNS; //导入方法依赖的package包/类
/**
* Get data-node in the form <host name> : <port> where port is a 6
* digit integer. This is necessary in order to provide lexocographic
* ordering. Host names are all the same, the ordering goes by port
* numbers.
*/
private static String getNodeName(int port) throws IOException {
String machineName = DNS.getDefaultHost("default", "default");
String sPort = String.valueOf(100000 + port);
if (sPort.length() > 6)
throw new IOException("Too many data-nodes.");
return machineName + ":" + sPort;
}