當前位置: 首頁>>代碼示例>>Java>>正文


Java NetUtils.getOutputStream方法代碼示例

本文整理匯總了Java中org.apache.hadoop.net.NetUtils.getOutputStream方法的典型用法代碼示例。如果您正苦於以下問題:Java NetUtils.getOutputStream方法的具體用法?Java NetUtils.getOutputStream怎麽用?Java NetUtils.getOutputStream使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.net.NetUtils的用法示例。


在下文中一共展示了NetUtils.getOutputStream方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: connectToDN

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:42,代碼來源:NuCypherExtUtilClient.java

示例2: connectToDN

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:DFSClient.java

示例3: transferRbw

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:21,代碼來源:DFSTestUtil.java

示例4: setupIOstreams

import org.apache.hadoop.net.NetUtils; //導入方法依賴的package包/類
/**
 * Connect to the server and set up the I/O streams. It then sends a
 * header to the server and starts the connection thread that waits for
 * responses.
 */
private synchronized void setupIOstreams() throws InterruptedException {
	if (socket != null || shouldCloseConnection.get()) {
		return;
	}

	try {
		if (LOG.isDebugEnabled()) {
			LOG.debug("Connecting to " + server);
		}
		/*
		 * short numRetries = 0; final short maxRetries = 15; Random
		 * rand = null;
		 */
		while (true) {
			setupConnection();
			InputStream inStream = NetUtils.getInputStream(socket);
			OutputStream outStream = NetUtils.getOutputStream(socket);
			writeRpcHeader(outStream);
			/*
			 * if (useSasl) { final InputStream in2 = inStream; final
			 * OutputStream out2 = outStream; UserGroupInformation
			 * ticket = remoteId.getTicket(); if (authMethod ==
			 * AuthMethod.KERBEROS) { if (ticket.getRealUser() != null)
			 * { ticket = ticket.getRealUser(); } } boolean continueSasl
			 * = false; try { continueSasl = ticket.doAs(new
			 * PrivilegedExceptionAction<Boolean>() {
			 *
			 * @Override public Boolean run() throws IOException {
			 * return setupSaslConnection(in2, out2); } }); } catch
			 * (Exception ex) { if (rand == null) { rand = new Random();
			 * } handleSaslConnectionFailure(numRetries++, maxRetries,
			 * ex, rand, ticket); continue; } if (continueSasl) { //
			 * Sasl connect is successful. Let's set up Sasl i/o
			 * streams. inStream =
			 * saslRpcClient.getInputStream(inStream); outStream =
			 * saslRpcClient.getOutputStream(outStream); } else { //
			 * fall back to simple auth because server told us so.
			 * authMethod = AuthMethod.SIMPLE; header = new
			 * ConnectionHeader(header.getProtocol(), header.getUgi(),
			 * authMethod); useSasl = false; } }
			 */
			this.in = new DataInputStream(new BufferedInputStream(new PingInputStream(inStream)));
			this.out = new DataOutputStream(new BufferedOutputStream(outStream));
			writeHeader();

			// update last activity time
			touch();

			// start the receiver thread after the socket connection has
			// been set up
			start();
			return;
		}
	} catch (Throwable t) {
		if (t instanceof IOException) {
			markClosed((IOException) t);
		} else {
			markClosed(new IOException("Couldn't set up IO streams", t));
		}
		close();
	}
}
 
開發者ID:spafka,項目名稱:spark_deep,代碼行數:68,代碼來源:Client.java


注:本文中的org.apache.hadoop.net.NetUtils.getOutputStream方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。