当前位置: 首页>>代码示例>>Java>>正文


Java NetUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetUtils的典型用法代码示例。如果您正苦于以下问题:Java NetUtils类的具体用法?Java NetUtils怎么用?Java NetUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


NetUtils类属于org.apache.hadoop.net包,在下文中一共展示了NetUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: connectToDN

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:NuCypherExtUtilClient.java

示例2: verifyServiceAddr

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
private void verifyServiceAddr(String host, String ip) {
  InetSocketAddress addr;
  int port = 123;

  // test host, port tuple
  //LOG.info("test tuple ("+host+","+port+")");
  addr = NetUtils.createSocketAddrForHost(host, port);
  verifyAddress(addr, host, ip, port);

  // test authority with no default port
  //LOG.info("test authority '"+host+":"+port+"'");
  addr = NetUtils.createSocketAddr(host+":"+port);
  verifyAddress(addr, host, ip, port);

  // test authority with a default port, make sure default isn't used
  //LOG.info("test authority '"+host+":"+port+"' with ignored default port");
  addr = NetUtils.createSocketAddr(host+":"+port, port+1);
  verifyAddress(addr, host, ip, port);

  // test host-only authority, using port as default port
  //LOG.info("test host:"+host+" port:"+port);
  addr = NetUtils.createSocketAddr(host, port);
  verifyAddress(addr, host, ip, port);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:25,代码来源:TestSecurityUtil.java

示例3: getDataNodeProxy

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DFSAdmin.java

示例4: getSocketAddr

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/**
 * Get the socket address for <code>hostProperty</code> as a
 * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
 * <code>null</code>, <code>addressProperty</code> will be used. This
 * is useful for cases where we want to differentiate between host
 * bind address and address clients should use to establish connection.
 *
 * @param hostProperty bind host property name.
 * @param addressProperty address property name.
 * @param defaultAddressValue the default value
 * @param defaultPort the default port
 * @return InetSocketAddress
 */
public InetSocketAddress getSocketAddr(
    String hostProperty,
    String addressProperty,
    String defaultAddressValue,
    int defaultPort) {

  InetSocketAddress bindAddr = getSocketAddr(
    addressProperty, defaultAddressValue, defaultPort);

  final String host = get(hostProperty);

  if (host == null || host.isEmpty()) {
    return bindAddr;
  }

  return NetUtils.createSocketAddr(
      host, bindAddr.getPort(), hostProperty);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:32,代码来源:Configuration.java

示例5: updateConnectAddr

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/**
 * Set the socket address a client can use to connect for the
 * <code>name</code> property as a <code>host:port</code>.  The wildcard
 * address is replaced with the local host's address. If the host and address
 * properties are configured the host component of the address will be combined
 * with the port component of the addr to generate the address.  This is to allow
 * optional control over which host name is used in multi-home bind-host
 * cases where a host can have multiple names
 * @param hostProperty the bind-host configuration name
 * @param addressProperty the service address configuration name
 * @param defaultAddressValue the service default address configuration value
 * @param addr InetSocketAddress of the service listener
 * @return InetSocketAddress for clients to connect
 */
public InetSocketAddress updateConnectAddr(
    String hostProperty,
    String addressProperty,
    String defaultAddressValue,
    InetSocketAddress addr) {

  final String host = get(hostProperty);
  final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);

  if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
    //not our case, fall back to original logic
    return updateConnectAddr(addressProperty, addr);
  }

  final String connectHost = connectHostPort.split(":")[0];
  // Create connect address using client address hostname and server port.
  return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
      connectHost, addr.getPort()));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:34,代码来源:Configuration.java

示例6: getRmClient

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
private static ApplicationClientProtocol getRmClient(Token<?> token,
    Configuration conf) throws IOException {
  String[] services = token.getService().toString().split(",");
  for (String service : services) {
    InetSocketAddress addr = NetUtils.createSocketAddr(service);
    if (localSecretManager != null) {
      // return null if it's our token
      if (localServiceAddress.getAddress().isAnyLocalAddress()) {
        if (NetUtils.isLocalAddress(addr.getAddress()) &&
            addr.getPort() == localServiceAddress.getPort()) {
          return null;
        }
      } else if (addr.equals(localServiceAddress)) {
        return null;
      }
    }
  }
  return ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:RMDelegationTokenIdentifier.java

示例7: testVersion0ClientVersion1Server

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
@Test  // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);

  proxy = RPC.getProtocolProxy(
      TestProtocol0.class, TestProtocol0.versionID, addr, conf);

  TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
  proxy0.ping();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestRPCCompatibility.java

示例8: testHttpsCookie

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
@Test
public void testHttpsCookie() throws IOException, GeneralSecurityException {
  URL base = new URL("https://" + NetUtils.getHostPortString(server
          .getConnectorAddress(1)));
  HttpsURLConnection conn = (HttpsURLConnection) new URL(base,
          "/echo").openConnection();
  conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());

  String header = conn.getHeaderField("Set-Cookie");
  List<HttpCookie> cookies = HttpCookie.parse(header);
  Assert.assertTrue(!cookies.isEmpty());
  Assert.assertTrue(header.contains("; HttpOnly"));
  Assert.assertTrue(cookies.get(0).getSecure());
  Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestHttpCookieFlag.java

示例9: connectToDN

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:DFSClient.java

示例10: testPersistentCookie

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
@Test
public void testPersistentCookie() throws IOException {
  try {
      startServer(false);
  } catch (Exception e) {
      // Auto-generated catch block
      e.printStackTrace();
  }

  URL base = new URL("http://" + NetUtils.getHostPortString(server
          .getConnectorAddress(0)));
  HttpURLConnection conn = (HttpURLConnection) new URL(base,
          "/echo").openConnection();

  String header = conn.getHeaderField("Set-Cookie");
  List<HttpCookie> cookies = HttpCookie.parse(header);
  Assert.assertTrue(!cookies.isEmpty());
  Log.info(header);
  Assert.assertTrue(header.contains("; Expires="));
  Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:TestAuthenticationSessionCookie.java

示例11: newConnectedPeer

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSClient.java

示例12: transferRbw

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DFSTestUtil.java

示例13: testInterDNProtocolTimeout

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestInterDatanodeProtocol.java

示例14: testIpcConnectTimeout

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
@Test(timeout=60000)
public void testIpcConnectTimeout() throws IOException {
  // start server
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  //Intentionally do not start server to get a connection timeout

  // start client
  Client.setConnectTimeout(conf, 100);
  Client client = new Client(LongWritable.class, conf);
  // set the rpc timeout to twice the MIN_SLEEP_TIME
  try {
    call(client, new LongWritable(RANDOM.nextLong()), addr,
        MIN_SLEEP_TIME * 2, conf);
    fail("Expected an exception to have been thrown");
  } catch (SocketTimeoutException e) {
    LOG.info("Get a SocketTimeoutException ", e);
  }
  client.stop();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:TestIPC.java

示例15: substituteForWildcardAddress

import org.apache.hadoop.net.NetUtils; //导入依赖的package包/类
/**
 * Substitute a default host in the case that an address has been configured
 * with a wildcard. This is used, for example, when determining the HTTP
 * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
 * substitute the hostname from the filesystem URI rather than trying to
 * connect to 0.0.0.0.
 * @param configuredAddress the address found in the configuration
 * @param defaultHost the host to substitute with, if configuredAddress
 * is a local/wildcard address.
 * @return the substituted address
 * @throws IOException if it is a wildcard address and security is enabled
 */
@VisibleForTesting
static String substituteForWildcardAddress(String configuredAddress,
  String defaultHost) throws IOException {
  InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
  InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
      + ":0");
  final InetAddress addr = sockAddr.getAddress();
  if (addr != null && addr.isAnyLocalAddress()) {
    if (UserGroupInformation.isSecurityEnabled() &&
        defaultSockAddr.getAddress().isAnyLocalAddress()) {
      throw new IOException("Cannot use a wildcard address with security. " +
          "Must explicitly set bind address for Kerberos");
    }
    return defaultHost + ":" + sockAddr.getPort();
  } else {
    return configuredAddress;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:DFSUtil.java


注:本文中的org.apache.hadoop.net.NetUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。