当前位置: 首页>>代码示例>>Java>>正文


Java NetUtils.connect方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetUtils.connect方法的典型用法代码示例。如果您正苦于以下问题:Java NetUtils.connect方法的具体用法?Java NetUtils.connect怎么用?Java NetUtils.connect使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.NetUtils的用法示例。


在下文中一共展示了NetUtils.connect方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: connectToDN

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:NuCypherExtUtilClient.java

示例2: createSocketForPipeline

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
 * Create a socket for a write pipeline
 * @param first the first datanode 
 * @param length the pipeline length
 * @param client client
 * @return the socket connected to the first datanode
 */
static Socket createSocketForPipeline(final DatanodeInfo first,
    final int length, final DFSClient client) throws IOException {
  final String dnAddr = first.getXferAddr(
      client.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
  final Socket sock = client.socketFactory.createSocket();
  final int timeout = client.getDatanodeReadTimeout(length);
  NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
  sock.setSoTimeout(timeout);
  sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  if(DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
  }
  return sock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DFSOutputStream.java

示例3: connectToDN

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:DFSClient.java

示例4: newConnectedPeer

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSClient.java

示例5: setupConnection

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
protected synchronized void setupConnection() throws IOException {
  short ioFailures = 0;
  short timeoutFailures = 0;
  while (true) {
    try {
      this.socket = socketFactory.createSocket();
      this.socket.setTcpNoDelay(tcpNoDelay);
      this.socket.setKeepAlive(tcpKeepAlive);
      if (localAddr != null) {
        this.socket.bind(localAddr);
      }
      NetUtils.connect(this.socket, remoteId.getAddress(), connectTO);
      this.socket.setSoTimeout(readTO);
      return;
    } catch (SocketTimeoutException toe) {
      /* The max number of retries is 45,
       * which amounts to 20s*45 = 15 minutes retries.
       */
      handleConnectionFailure(timeoutFailures++, maxRetries, toe);
    } catch (IOException ie) {
      handleConnectionFailure(ioFailures++, maxRetries, ie);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:RpcClientImpl.java

示例6: doIpcVersionTest

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
private void doIpcVersionTest(
    byte[] requestData,
    byte[] expectedResponse) throws IOException {
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  server.start();
  Socket socket = new Socket();

  try {
    NetUtils.connect(socket, addr, 5000);
    
    OutputStream out = socket.getOutputStream();
    InputStream in = socket.getInputStream();
    out.write(requestData, 0, requestData.length);
    out.flush();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, baos, 256);
    
    byte[] responseData = baos.toByteArray();
    
    assertEquals(
        StringUtils.byteToHexString(expectedResponse),
        StringUtils.byteToHexString(responseData));
  } finally {
    IOUtils.closeSocket(socket);
    server.stop();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:29,代码来源:TestIPC.java

示例7: setupConnection

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
private synchronized void setupConnection() throws IOException {
  short ioFailures = 0;
  short timeoutFailures = 0;
  while (true) {
    try {
      this.socket = socketFactory.createSocket();
      this.socket.setTcpNoDelay(tcpNoDelay);
      this.socket.setKeepAlive(true);
      
      if (tcpLowLatency) {
        /*
         * This allows intermediate switches to shape IPC traffic
         * differently from Shuffle/HDFS DataStreamer traffic.
         *
         * IPTOS_RELIABILITY (0x04) | IPTOS_LOWDELAY (0x10)
         *
         * Prefer to optimize connect() speed & response latency over net
         * throughput.
         */
        this.socket.setTrafficClass(0x04 | 0x10);
        this.socket.setPerformancePreferences(1, 2, 0);
      }

      /*
       * Bind the socket to the host specified in the principal name of the
       * client, to ensure Server matching address of the client connection
       * to host name in principal passed.
       */
      UserGroupInformation ticket = remoteId.getTicket();
      if (ticket != null && ticket.hasKerberosCredentials()) {
        KerberosInfo krbInfo = 
          remoteId.getProtocol().getAnnotation(KerberosInfo.class);
        if (krbInfo != null && krbInfo.clientPrincipal() != null) {
          String host = 
            SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName());
          
          // If host name is a valid local address then bind socket to it
          InetAddress localAddr = NetUtils.getLocalInetAddress(host);
          if (localAddr != null) {
            this.socket.setReuseAddress(true);
            this.socket.bind(new InetSocketAddress(localAddr, 0));
          }
        }
      }
      
      NetUtils.connect(this.socket, server, connectionTimeout);
      if (rpcTimeout > 0) {
        pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
      }
      this.socket.setSoTimeout(pingInterval);
      return;
    } catch (ConnectTimeoutException toe) {
      /* Check for an address change and update the local reference.
       * Reset the failure counter if the address was changed
       */
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionTimeout(timeoutFailures++,
          maxRetriesOnSocketTimeouts, toe);
    } catch (IOException ie) {
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionFailure(ioFailures++, ie);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:69,代码来源:Client.java

示例8: testMaxConnections

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
@Test
public void testMaxConnections() throws Exception {
  conf.setInt("ipc.server.max.connections", 5);
  Server server = null;
  Thread connectors[] = new Thread[10];

  try {
    server = new TestServer(3, false);
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    assertEquals(0, server.getNumOpenConnections());

    for (int i = 0; i < 10; i++) {
      connectors[i] = new Thread() {
        @Override
        public void run() {
          Socket sock = null;
          try {
            sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            NetUtils.connect(sock, addr, 3000);
            try {
              Thread.sleep(4000);
            } catch (InterruptedException ie) { }
          } catch (IOException ioe) {
          } finally {
            if (sock != null) {
              try {
                sock.close();
              } catch (IOException ioe) { }
            }
          }
        }
      };
      connectors[i].start();
    }

    Thread.sleep(1000);
    // server should only accept up to 5 connections
    assertEquals(5, server.getNumOpenConnections());

    for (int i = 0; i < 10; i++) {
      connectors[i].join();
    }
  } finally {
    if (server != null) {
      server.stop();
    }
    conf.setInt("ipc.server.max.connections", 0);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:51,代码来源:TestIPC.java

示例9: setupConnection

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
private synchronized void setupConnection() throws IOException {
	short ioFailures = 0;
	short timeoutFailures = 0;
	while (true) {
		try {
			this.socket = socketFactory.createSocket();
			this.socket.setTcpNoDelay(tcpNoDelay);
			/*
			 * Bind the socket to the host specified in the principal
			 * name of the client, to ensure Server matching address of
			 * the client connection to host name in principal passed.
			 */
			/*
			 * if (UserGroupInformation.isSecurityEnabled()) {
			 * KerberosInfo krbInfo =
			 * remoteId.getProtocol().getAnnotation(KerberosInfo.class);
			 * if (krbInfo != null && krbInfo.clientPrincipal() != null)
			 * { String host =
			 * SecurityUtil.getHostFromPrincipal(remoteId.getTicket().
			 * getUserName());
			 *
			 * // If host name is a valid local address then bind socket
			 * to it InetAddress localAddr =
			 * NetUtils.getLocalInetAddress(host); if (localAddr !=
			 * null) { this.socket.bind(new InetSocketAddress(localAddr,
			 * 0)); } } }
			 */
			// connection time out is 20s

			this.socket.bind(new InetSocketAddress(conf.get("client.ip.name"), 0));
			NetUtils.connect(this.socket, server, 20000);
			if (rpcTimeout > 0) {
				pingInterval = rpcTimeout; // rpcTimeout overwrites
											// pingInterval
			}

			this.socket.setSoTimeout(pingInterval);
			return;
		} catch (SocketTimeoutException toe) {
			/*
			 * Check for an address change and update the local
			 * reference. Reset the failure counter if the address was
			 * changed
			 */
			if (updateAddress()) {
				timeoutFailures = ioFailures = 0;
			}
			/*
			 * The max number of retries is 45, which amounts to 20s*45
			 * = 15 minutes retries.
			 */
			handleConnectionFailure(timeoutFailures++, 45, toe);
		} catch (IOException ie) {
			if (updateAddress()) {
				timeoutFailures = ioFailures = 0;
			}
			handleConnectionFailure(ioFailures++, maxRetries, ie);
		}
	}
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:61,代码来源:Client.java

示例10: setupConnection

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
private synchronized void setupConnection() throws IOException {
  short ioFailures = 0;
  short timeoutFailures = 0;
  while (true) {
    try {
      this.socket = socketFactory.createSocket();
      this.socket.setTcpNoDelay(tcpNoDelay);
      this.socket.setKeepAlive(true);
      
      /*
       * Bind the socket to the host specified in the principal name of the
       * client, to ensure Server matching address of the client connection
       * to host name in principal passed.
       */
      UserGroupInformation ticket = remoteId.getTicket();
      if (ticket != null && ticket.hasKerberosCredentials()) {
        KerberosInfo krbInfo = 
          remoteId.getProtocol().getAnnotation(KerberosInfo.class);
        if (krbInfo != null && krbInfo.clientPrincipal() != null) {
          String host = 
            SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName());
          
          // If host name is a valid local address then bind socket to it
          InetAddress localAddr = NetUtils.getLocalInetAddress(host);
          if (localAddr != null) {
            this.socket.bind(new InetSocketAddress(localAddr, 0));
          }
        }
      }
      
      NetUtils.connect(this.socket, server, connectionTimeout);
      if (rpcTimeout > 0) {
        pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
      }
      this.socket.setSoTimeout(pingInterval);
      return;
    } catch (ConnectTimeoutException toe) {
      /* Check for an address change and update the local reference.
       * Reset the failure counter if the address was changed
       */
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionTimeout(timeoutFailures++,
          maxRetriesOnSocketTimeouts, toe);
    } catch (IOException ie) {
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionFailure(ioFailures++, ie);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:Client.java


注:本文中的org.apache.hadoop.net.NetUtils.connect方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。