本文整理汇总了Java中org.apache.hadoop.net.NetUtils.createSocketAddr方法的典型用法代码示例。如果您正苦于以下问题:Java NetUtils.createSocketAddr方法的具体用法?Java NetUtils.createSocketAddr怎么用?Java NetUtils.createSocketAddr使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.net.NetUtils
的用法示例。
在下文中一共展示了NetUtils.createSocketAddr方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
public void start(Configuration conf) {
YarnRPC rpc = YarnRPC.create(conf);
//TODO : use fixed port ??
InetSocketAddress address = NetUtils.createSocketAddr(hostAddress);
InetAddress hostNameResolved = null;
try {
address.getAddress();
hostNameResolved = InetAddress.getLocalHost();
} catch (UnknownHostException e) {
throw new YarnRuntimeException(e);
}
server =
rpc.getServer(protocol, this, address,
conf, null, 1);
server.start();
this.bindAddress = NetUtils.getConnectAddress(server);
super.start();
amRunning = true;
}
示例2: start
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
void start() throws IOException {
final InetSocketAddress httpAddr = getAddress(conf);
final String httpsAddrString = conf.get(
DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "journal",
DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);
httpServer = builder.build();
httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
httpServer.addInternalServlet("getJournal", "/getJournal",
GetJournalEditServlet.class, true);
httpServer.start();
}
示例3: getAddressesForNameserviceId
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
static Map<String, InetSocketAddress> getAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue, String... keys) {
Collection<String> nnIds = getNameNodeIds(conf, nsId);
Map<String, InetSocketAddress> ret = Maps.newHashMap();
for (String nnId : emptyAsSingletonNull(nnIds)) {
String suffix = concatSuffixes(nsId, nnId);
String address = getConfValue(defaultValue, suffix, conf, keys);
if (address != null) {
InetSocketAddress isa = NetUtils.createSocketAddr(address);
if (isa.isUnresolved()) {
LOG.warn("Namenode for {} remains unresolved for ID {}. Check your "
+ "hdfs-site.xml file to ensure namenodes are configured "
+ "properly.", nsId, nnId);
}
ret.put(nnId, isa);
}
}
return ret;
}
示例4: newProxy
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
@Private
@VisibleForTesting
protected ContainerManagementProtocol newProxy(final YarnRPC rpc,
String containerManagerBindAddr, ContainerId containerId, Token token)
throws InvalidToken {
if (token == null) {
throw new InvalidToken("No NMToken sent for "
+ containerManagerBindAddr);
}
final InetSocketAddress cmAddr =
NetUtils.createSocketAddr(containerManagerBindAddr);
LOG.info("Opening proxy : " + containerManagerBindAddr);
// the user in createRemoteUser in this context has to be ContainerID
UserGroupInformation user =
UserGroupInformation.createRemoteUser(containerId
.getApplicationAttemptId().toString());
org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken =
ConverterUtils.convertFromYarn(token, cmAddr);
user.addToken(nmToken);
return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class,
user, rpc, cmAddr);
}
示例5: start
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
void start() throws IOException {
final InetSocketAddress httpAddr = getHttpAddress(conf);
final String httpsAddrString = conf.get(
NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "nfs3",
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);
this.httpServer = builder.build();
this.httpServer.start();
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
int connIdx = 0;
if (policy.isHttpEnabled()) {
infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
}
if (policy.isHttpsEnabled()) {
infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
}
}
示例6: createSocketForPipeline
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Create a socket for a write pipeline
* @param first the first datanode
* @param length the pipeline length
* @param client client
* @return the socket connected to the first datanode
*/
static Socket createSocketForPipeline(final DatanodeInfo first,
final int length, final DFSClient client) throws IOException {
final String dnAddr = first.getXferAddr(
client.getConf().connectToDnViaHostname);
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
}
final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
final Socket sock = client.socketFactory.createSocket();
final int timeout = client.getDatanodeReadTimeout(length);
NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
sock.setSoTimeout(timeout);
sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
if(DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
}
return sock;
}
示例7: substituteForWildcardAddress
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Substitute a default host in the case that an address has been configured
* with a wildcard. This is used, for example, when determining the HTTP
* address of the NN -- if it's configured to bind to 0.0.0.0, we want to
* substitute the hostname from the filesystem URI rather than trying to
* connect to 0.0.0.0.
* @param configuredAddress the address found in the configuration
* @param defaultHost the host to substitute with, if configuredAddress
* is a local/wildcard address.
* @return the substituted address
* @throws IOException if it is a wildcard address and security is enabled
*/
@VisibleForTesting
static String substituteForWildcardAddress(String configuredAddress,
String defaultHost) throws IOException {
InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
+ ":0");
final InetAddress addr = sockAddr.getAddress();
if (addr != null && addr.isAnyLocalAddress()) {
if (UserGroupInformation.isSecurityEnabled() &&
defaultSockAddr.getAddress().isAnyLocalAddress()) {
throw new IOException("Cannot use a wildcard address with security. " +
"Must explicitly set bind address for Kerberos");
}
return defaultHost + ":" + sockAddr.getPort();
} else {
return configuredAddress;
}
}
示例8: buildDTServiceName
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* create the service name for a Delegation token
* @param uri of the service
* @param defPort is used if the uri lacks a port
* @return the token service, or null if no authority
* @see #buildTokenService(InetSocketAddress)
*/
public static String buildDTServiceName(URI uri, int defPort) {
String authority = uri.getAuthority();
if (authority == null) {
return null;
}
InetSocketAddress addr = NetUtils.createSocketAddr(authority, defPort);
return buildTokenService(addr).toString();
}
示例9: getLifelineRpcServerAddress
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Given a configuration get the address of the lifeline RPC server.
* If the lifeline RPC is not configured returns null.
*
* @param conf configuration
* @return address or null
*/
InetSocketAddress getLifelineRpcServerAddress(Configuration conf) {
String addr = getTrimmedOrNull(conf, DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY);
if (addr == null) {
return null;
}
return NetUtils.createSocketAddr(addr);
}
示例10: getApplicationWebURLOnJHSWithoutScheme
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
public static String getApplicationWebURLOnJHSWithoutScheme(Configuration conf,
ApplicationId appId)
throws UnknownHostException {
//construct the history url for job
String addr = getJHSWebappURLWithoutScheme(conf);
Iterator<String> it = ADDR_SPLITTER.split(addr).iterator();
it.next(); // ignore the bind host
String port = it.next();
// Use hs address to figure out the host for webapp
addr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
String host = ADDR_SPLITTER.split(addr).iterator().next();
String hsAddress = JOINER.join(host, ":", port);
InetSocketAddress address = NetUtils.createSocketAddr(
hsAddress, getDefaultJHSWebappPort(),
getDefaultJHSWebappURLWithoutScheme());
StringBuffer sb = new StringBuffer();
if (address.getAddress().isAnyLocalAddress() ||
address.getAddress().isLoopbackAddress()) {
sb.append(InetAddress.getLocalHost().getCanonicalHostName());
} else {
sb.append(address.getHostName());
}
sb.append(":").append(address.getPort());
sb.append("/jobhistory/job/");
JobID jobId = TypeConverter.fromYarn(appId);
sb.append(jobId.toString());
return sb.toString();
}
示例11: getSocketAddr
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Get the socket address for <code>name</code> property as a
* <code>InetSocketAddress</code>. On a HA cluster,
* this fetches the address corresponding to the RM identified by
* {@link #RM_HA_ID}.
* @param name property name.
* @param defaultAddress the default value
* @param defaultPort the default port
* @return InetSocketAddress
*/
@Override
public InetSocketAddress getSocketAddr(
String name, String defaultAddress, int defaultPort) {
String address;
if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) {
address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this);
} else {
address = get(name, defaultAddress);
}
return NetUtils.createSocketAddr(address, defaultPort, name);
}
示例12: getBestNodeDNAddrPair
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
* Get the best node from which to stream the data.
* @param block LocatedBlock, containing nodes in priority order.
* @param ignoredNodes Do not choose nodes in this array (may be null)
* @return The DNAddrPair of the best node.
* @throws IOException
*/
private DNAddrPair getBestNodeDNAddrPair(LocatedBlock block,
Collection<DatanodeInfo> ignoredNodes) throws IOException {
DatanodeInfo[] nodes = block.getLocations();
StorageType[] storageTypes = block.getStorageTypes();
DatanodeInfo chosenNode = null;
StorageType storageType = null;
if (nodes != null) {
for (int i = 0; i < nodes.length; i++) {
if (!deadNodes.containsKey(nodes[i])
&& (ignoredNodes == null || !ignoredNodes.contains(nodes[i]))) {
chosenNode = nodes[i];
// Storage types are ordered to correspond with nodes, so use the same
// index to get storage type.
if (storageTypes != null && i < storageTypes.length) {
storageType = storageTypes[i];
}
break;
}
}
}
if (chosenNode == null) {
throw new IOException("No live nodes contain block " + block.getBlock() +
" after checking nodes = " + Arrays.toString(nodes) +
", ignoredNodes = " + ignoredNodes);
}
final String dnAddr =
chosenNode.getXferAddr(dfsClient.getConf().connectToDnViaHostname);
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
}
InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
return new DNAddrPair(chosenNode, targetAddr, storageType);
}
示例13: getContainerManagementProtocolProxy
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
protected ContainerManagementProtocol getContainerManagementProtocolProxy(
final YarnRPC rpc, org.apache.hadoop.yarn.api.records.Token nmToken,
NodeId nodeId, String user) {
ContainerManagementProtocol proxy;
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
final InetSocketAddress addr =
NetUtils.createSocketAddr(nodeId.getHost(), nodeId.getPort());
if (nmToken != null) {
ugi.addToken(ConverterUtils.convertFromYarn(nmToken, addr));
}
proxy =
NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
rpc, addr);
return proxy;
}
示例14: testShortCircuitRenewCancel
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
@Test
public void testShortCircuitRenewCancel()
throws IOException, InterruptedException {
InetSocketAddress addr = NetUtils.createSocketAddr(
InetAddress.getLocalHost().getHostName(), 123, null);
checkShortCircuitRenewCancel(addr, addr, true);
}
示例15: getAddress
import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
private static InetSocketAddress getAddress(Configuration conf) {
String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
return NetUtils.createSocketAddr(addr,
DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
}