當前位置: 首頁>>代碼示例>>Java>>正文


Java NetUtils.connect方法代碼示例

本文整理匯總了Java中org.apache.hadoop.net.NetUtils.connect方法的典型用法代碼示例。如果您正苦於以下問題:Java NetUtils.connect方法的具體用法?Java NetUtils.connect怎麽用?Java NetUtils.connect使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.net.NetUtils的用法示例。


在下文中一共展示了NetUtils.connect方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: connectToDN

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:42,代碼來源:NuCypherExtUtilClient.java

示例2: createSocketForPipeline

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Create a socket for a write pipeline
 * @param first the first datanode 
 * @param length the pipeline length
 * @param client client
 * @return the socket connected to the first datanode
 */
static Socket createSocketForPipeline(final DatanodeInfo first,
    final int length, final DFSClient client) throws IOException {
  final String dnAddr = first.getXferAddr(
      client.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
  final Socket sock = client.socketFactory.createSocket();
  final int timeout = client.getDatanodeReadTimeout(length);
  NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
  sock.setSoTimeout(timeout);
  sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  if(DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
  }
  return sock;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:DFSOutputStream.java

示例3: connectToDN

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:DFSClient.java

示例4: newConnectedPeer

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:25,代碼來源:DFSClient.java

示例5: setupConnection

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
protected synchronized void setupConnection() throws IOException {
  short ioFailures = 0;
  short timeoutFailures = 0;
  while (true) {
    try {
      this.socket = socketFactory.createSocket();
      this.socket.setTcpNoDelay(tcpNoDelay);
      this.socket.setKeepAlive(tcpKeepAlive);
      if (localAddr != null) {
        this.socket.bind(localAddr);
      }
      NetUtils.connect(this.socket, remoteId.getAddress(), connectTO);
      this.socket.setSoTimeout(readTO);
      return;
    } catch (SocketTimeoutException toe) {
      /* The max number of retries is 45,
       * which amounts to 20s*45 = 15 minutes retries.
       */
      handleConnectionFailure(timeoutFailures++, maxRetries, toe);
    } catch (IOException ie) {
      handleConnectionFailure(ioFailures++, maxRetries, ie);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:RpcClientImpl.java

示例6: doIpcVersionTest

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
private void doIpcVersionTest(
    byte[] requestData,
    byte[] expectedResponse) throws IOException {
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  server.start();
  Socket socket = new Socket();

  try {
    NetUtils.connect(socket, addr, 5000);
    
    OutputStream out = socket.getOutputStream();
    InputStream in = socket.getInputStream();
    out.write(requestData, 0, requestData.length);
    out.flush();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, baos, 256);
    
    byte[] responseData = baos.toByteArray();
    
    assertEquals(
        StringUtils.byteToHexString(expectedResponse),
        StringUtils.byteToHexString(responseData));
  } finally {
    IOUtils.closeSocket(socket);
    server.stop();
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:29,代碼來源:TestIPC.java

示例7: setupConnection

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
private synchronized void setupConnection() throws IOException {
  short ioFailures = 0;
  short timeoutFailures = 0;
  while (true) {
    try {
      this.socket = socketFactory.createSocket();
      this.socket.setTcpNoDelay(tcpNoDelay);
      this.socket.setKeepAlive(true);
      
      if (tcpLowLatency) {
        /*
         * This allows intermediate switches to shape IPC traffic
         * differently from Shuffle/HDFS DataStreamer traffic.
         *
         * IPTOS_RELIABILITY (0x04) | IPTOS_LOWDELAY (0x10)
         *
         * Prefer to optimize connect() speed & response latency over net
         * throughput.
         */
        this.socket.setTrafficClass(0x04 | 0x10);
        this.socket.setPerformancePreferences(1, 2, 0);
      }

      /*
       * Bind the socket to the host specified in the principal name of the
       * client, to ensure Server matching address of the client connection
       * to host name in principal passed.
       */
      UserGroupInformation ticket = remoteId.getTicket();
      if (ticket != null && ticket.hasKerberosCredentials()) {
        KerberosInfo krbInfo = 
          remoteId.getProtocol().getAnnotation(KerberosInfo.class);
        if (krbInfo != null && krbInfo.clientPrincipal() != null) {
          String host = 
            SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName());
          
          // If host name is a valid local address then bind socket to it
          InetAddress localAddr = NetUtils.getLocalInetAddress(host);
          if (localAddr != null) {
            this.socket.setReuseAddress(true);
            this.socket.bind(new InetSocketAddress(localAddr, 0));
          }
        }
      }
      
      NetUtils.connect(this.socket, server, connectionTimeout);
      if (rpcTimeout > 0) {
        pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
      }
      this.socket.setSoTimeout(pingInterval);
      return;
    } catch (ConnectTimeoutException toe) {
      /* Check for an address change and update the local reference.
       * Reset the failure counter if the address was changed
       */
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionTimeout(timeoutFailures++,
          maxRetriesOnSocketTimeouts, toe);
    } catch (IOException ie) {
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionFailure(ioFailures++, ie);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:69,代碼來源:Client.java

示例8: testMaxConnections

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
@Test
public void testMaxConnections() throws Exception {
  conf.setInt("ipc.server.max.connections", 5);
  Server server = null;
  Thread connectors[] = new Thread[10];

  try {
    server = new TestServer(3, false);
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    assertEquals(0, server.getNumOpenConnections());

    for (int i = 0; i < 10; i++) {
      connectors[i] = new Thread() {
        @Override
        public void run() {
          Socket sock = null;
          try {
            sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
            NetUtils.connect(sock, addr, 3000);
            try {
              Thread.sleep(4000);
            } catch (InterruptedException ie) { }
          } catch (IOException ioe) {
          } finally {
            if (sock != null) {
              try {
                sock.close();
              } catch (IOException ioe) { }
            }
          }
        }
      };
      connectors[i].start();
    }

    Thread.sleep(1000);
    // server should only accept up to 5 connections
    assertEquals(5, server.getNumOpenConnections());

    for (int i = 0; i < 10; i++) {
      connectors[i].join();
    }
  } finally {
    if (server != null) {
      server.stop();
    }
    conf.setInt("ipc.server.max.connections", 0);
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:51,代碼來源:TestIPC.java

示例9: setupConnection

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
private synchronized void setupConnection() throws IOException {
	short ioFailures = 0;
	short timeoutFailures = 0;
	while (true) {
		try {
			this.socket = socketFactory.createSocket();
			this.socket.setTcpNoDelay(tcpNoDelay);
			/*
			 * Bind the socket to the host specified in the principal
			 * name of the client, to ensure Server matching address of
			 * the client connection to host name in principal passed.
			 */
			/*
			 * if (UserGroupInformation.isSecurityEnabled()) {
			 * KerberosInfo krbInfo =
			 * remoteId.getProtocol().getAnnotation(KerberosInfo.class);
			 * if (krbInfo != null && krbInfo.clientPrincipal() != null)
			 * { String host =
			 * SecurityUtil.getHostFromPrincipal(remoteId.getTicket().
			 * getUserName());
			 *
			 * // If host name is a valid local address then bind socket
			 * to it InetAddress localAddr =
			 * NetUtils.getLocalInetAddress(host); if (localAddr !=
			 * null) { this.socket.bind(new InetSocketAddress(localAddr,
			 * 0)); } } }
			 */
			// connection time out is 20s

			this.socket.bind(new InetSocketAddress(conf.get("client.ip.name"), 0));
			NetUtils.connect(this.socket, server, 20000);
			if (rpcTimeout > 0) {
				pingInterval = rpcTimeout; // rpcTimeout overwrites
											// pingInterval
			}

			this.socket.setSoTimeout(pingInterval);
			return;
		} catch (SocketTimeoutException toe) {
			/*
			 * Check for an address change and update the local
			 * reference. Reset the failure counter if the address was
			 * changed
			 */
			if (updateAddress()) {
				timeoutFailures = ioFailures = 0;
			}
			/*
			 * The max number of retries is 45, which amounts to 20s*45
			 * = 15 minutes retries.
			 */
			handleConnectionFailure(timeoutFailures++, 45, toe);
		} catch (IOException ie) {
			if (updateAddress()) {
				timeoutFailures = ioFailures = 0;
			}
			handleConnectionFailure(ioFailures++, maxRetries, ie);
		}
	}
}
 
開發者ID:spafka,項目名稱:spark_deep,代碼行數:61,代碼來源:Client.java

示例10: setupConnection

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
private synchronized void setupConnection() throws IOException {
  short ioFailures = 0;
  short timeoutFailures = 0;
  while (true) {
    try {
      this.socket = socketFactory.createSocket();
      this.socket.setTcpNoDelay(tcpNoDelay);
      this.socket.setKeepAlive(true);
      
      /*
       * Bind the socket to the host specified in the principal name of the
       * client, to ensure Server matching address of the client connection
       * to host name in principal passed.
       */
      UserGroupInformation ticket = remoteId.getTicket();
      if (ticket != null && ticket.hasKerberosCredentials()) {
        KerberosInfo krbInfo = 
          remoteId.getProtocol().getAnnotation(KerberosInfo.class);
        if (krbInfo != null && krbInfo.clientPrincipal() != null) {
          String host = 
            SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName());
          
          // If host name is a valid local address then bind socket to it
          InetAddress localAddr = NetUtils.getLocalInetAddress(host);
          if (localAddr != null) {
            this.socket.bind(new InetSocketAddress(localAddr, 0));
          }
        }
      }
      
      NetUtils.connect(this.socket, server, connectionTimeout);
      if (rpcTimeout > 0) {
        pingInterval = rpcTimeout;  // rpcTimeout overwrites pingInterval
      }
      this.socket.setSoTimeout(pingInterval);
      return;
    } catch (ConnectTimeoutException toe) {
      /* Check for an address change and update the local reference.
       * Reset the failure counter if the address was changed
       */
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionTimeout(timeoutFailures++,
          maxRetriesOnSocketTimeouts, toe);
    } catch (IOException ie) {
      if (updateAddress()) {
        timeoutFailures = ioFailures = 0;
      }
      handleConnectionFailure(ioFailures++, ie);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:54,代碼來源:Client.java


注:本文中的org.apache.hadoop.net.NetUtils.connect方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。