当前位置: 首页>>代码示例>>Java>>正文


Java IOUtils.closeSocket方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.IOUtils.closeSocket方法的典型用法代码示例。如果您正苦于以下问题:Java IOUtils.closeSocket方法的具体用法?Java IOUtils.closeSocket怎么用?Java IOUtils.closeSocket使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.IOUtils的用法示例。


在下文中一共展示了IOUtils.closeSocket方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: connectToDN

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:NuCypherExtUtilClient.java

示例2: connectToDN

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:DFSClient.java

示例3: newConnectedPeer

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSClient.java

示例4: doIpcVersionTest

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
private void doIpcVersionTest(
    byte[] requestData,
    byte[] expectedResponse) throws IOException {
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  server.start();
  Socket socket = new Socket();

  try {
    NetUtils.connect(socket, addr, 5000);
    
    OutputStream out = socket.getOutputStream();
    InputStream in = socket.getInputStream();
    out.write(requestData, 0, requestData.length);
    out.flush();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    IOUtils.copyBytes(in, baos, 256);
    
    byte[] responseData = baos.toByteArray();
    
    assertEquals(
        StringUtils.byteToHexString(expectedResponse),
        StringUtils.byteToHexString(responseData));
  } finally {
    IOUtils.closeSocket(socket);
    server.stop();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:29,代码来源:TestIPC.java

示例5: startSingleTemporaryRedirectResponseThread

import org.apache.hadoop.io.IOUtils; //导入方法依赖的package包/类
/**
 * Starts a background thread that accepts one and only one client connection
 * on the server socket, sends an HTTP 307 Temporary Redirect response, and
 * then exits.  This is useful for testing timeouts on the second step of
 * methods that issue 2 HTTP requests (request 1, redirect, request 2).
 * 
 * For handling the first request, this method sets socket timeout to use the
 * initial values defined in URLUtils.  Afterwards, it guarantees that the
 * second request will use a very short timeout.
 * 
 * Optionally, the thread may consume the connection backlog immediately after
 * receiving its one and only client connection.  This is useful for forcing a
 * connection timeout on the second request.
 * 
 * On tearDown, open client connections are closed, and the thread is joined.
 * 
 * @param consumeConnectionBacklog boolean whether or not to consume connection
 *   backlog and thus force a connection timeout on the second request
 */
private void startSingleTemporaryRedirectResponseThread(
    final boolean consumeConnectionBacklog) {
  fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
  serverThread = new Thread() {
    @Override
    public void run() {
      Socket clientSocket = null;
      OutputStream out = null;
      InputStream in = null;
      InputStreamReader isr = null;
      BufferedReader br = null;
      try {
        // Accept one and only one client connection.
        clientSocket = serverSocket.accept();

        // Immediately setup conditions for subsequent connections.
        fs.connectionFactory = connectionFactory;
        if (consumeConnectionBacklog) {
          consumeConnectionBacklog();
        }

        // Consume client's HTTP request by reading until EOF or empty line.
        in = clientSocket.getInputStream();
        isr = new InputStreamReader(in);
        br = new BufferedReader(isr);
        for (;;) {
          String line = br.readLine();
          if (line == null || line.isEmpty()) {
            break;
          }
        }

        // Write response.
        out = clientSocket.getOutputStream();
        out.write(temporaryRedirect().getBytes("UTF-8"));
      } catch (IOException e) {
        // Fail the test on any I/O error in the server thread.
        LOG.error("unexpected IOException in server thread", e);
        fail("unexpected IOException in server thread: " + e);
      } finally {
        // Clean it all up.
        IOUtils.cleanup(LOG, br, isr, in, out);
        IOUtils.closeSocket(clientSocket);
      }
    }
  };
  serverThread.start();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestWebHdfsTimeouts.java


注:本文中的org.apache.hadoop.io.IOUtils.closeSocket方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。