当前位置: 首页>>代码示例>>Java>>正文


Java NetUtils.getInputStream方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.NetUtils.getInputStream方法的典型用法代码示例。如果您正苦于以下问题:Java NetUtils.getInputStream方法的具体用法?Java NetUtils.getInputStream怎么用?Java NetUtils.getInputStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.NetUtils的用法示例。


在下文中一共展示了NetUtils.getInputStream方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: connectToDN

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:NuCypherExtUtilClient.java

示例2: connectToDN

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:DFSClient.java

示例3: transferRbw

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DFSTestUtil.java

示例4: setupIOstreams

import org.apache.hadoop.net.NetUtils; //导入方法依赖的package包/类
/**
 * Connect to the server and set up the I/O streams. It then sends a
 * header to the server and starts the connection thread that waits for
 * responses.
 */
private synchronized void setupIOstreams() throws InterruptedException {
	if (socket != null || shouldCloseConnection.get()) {
		return;
	}

	try {
		if (LOG.isDebugEnabled()) {
			LOG.debug("Connecting to " + server);
		}
		/*
		 * short numRetries = 0; final short maxRetries = 15; Random
		 * rand = null;
		 */
		while (true) {
			setupConnection();
			InputStream inStream = NetUtils.getInputStream(socket);
			OutputStream outStream = NetUtils.getOutputStream(socket);
			writeRpcHeader(outStream);
			/*
			 * if (useSasl) { final InputStream in2 = inStream; final
			 * OutputStream out2 = outStream; UserGroupInformation
			 * ticket = remoteId.getTicket(); if (authMethod ==
			 * AuthMethod.KERBEROS) { if (ticket.getRealUser() != null)
			 * { ticket = ticket.getRealUser(); } } boolean continueSasl
			 * = false; try { continueSasl = ticket.doAs(new
			 * PrivilegedExceptionAction<Boolean>() {
			 *
			 * @Override public Boolean run() throws IOException {
			 * return setupSaslConnection(in2, out2); } }); } catch
			 * (Exception ex) { if (rand == null) { rand = new Random();
			 * } handleSaslConnectionFailure(numRetries++, maxRetries,
			 * ex, rand, ticket); continue; } if (continueSasl) { //
			 * Sasl connect is successful. Let's set up Sasl i/o
			 * streams. inStream =
			 * saslRpcClient.getInputStream(inStream); outStream =
			 * saslRpcClient.getOutputStream(outStream); } else { //
			 * fall back to simple auth because server told us so.
			 * authMethod = AuthMethod.SIMPLE; header = new
			 * ConnectionHeader(header.getProtocol(), header.getUgi(),
			 * authMethod); useSasl = false; } }
			 */
			this.in = new DataInputStream(new BufferedInputStream(new PingInputStream(inStream)));
			this.out = new DataOutputStream(new BufferedOutputStream(outStream));
			writeHeader();

			// update last activity time
			touch();

			// start the receiver thread after the socket connection has
			// been set up
			start();
			return;
		}
	} catch (Throwable t) {
		if (t instanceof IOException) {
			markClosed((IOException) t);
		} else {
			markClosed(new IOException("Couldn't set up IO streams", t));
		}
		close();
	}
}
 
开发者ID:spafka,项目名称:spark_deep,代码行数:68,代码来源:Client.java


注:本文中的org.apache.hadoop.net.NetUtils.getInputStream方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。