当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConstants.WRITE_TIMEOUT_EXTENSION属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.WRITE_TIMEOUT_EXTENSION属性的典型用法代码示例。如果您正苦于以下问题:Java HdfsConstants.WRITE_TIMEOUT_EXTENSION属性的具体用法?Java HdfsConstants.WRITE_TIMEOUT_EXTENSION怎么用?Java HdfsConstants.WRITE_TIMEOUT_EXTENSION使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.common.HdfsConstants的用法示例。


在下文中一共展示了HdfsConstants.WRITE_TIMEOUT_EXTENSION属性的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getDatanodeWriteTimeout

/**
 * Return the timeout that clients should use when writing to datanodes.
 * @param numNodes the number of nodes in the pipeline.
 */
int getDatanodeWriteTimeout(int numNodes) {
  int confTime =
      conf.getInt("dfs.datanode.socket.write.timeout",
                  HdfsConstants.WRITE_TIMEOUT);

  return (confTime > 0) ?
    (confTime + HdfsConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:12,代码来源:DFSClient.java

示例2: run

/**
 * Do the deed, write the bytes
 */
public void run() {
  xmitsInProgress.getAndIncrement();
  Socket sock = null;
  DataOutputStream out = null;
  BlockSender blockSender = null;
  
  try {
    InetSocketAddress curTarget = 
      NetUtils.createSocketAddr(targets[0].getName());
    sock = newSocket();
    NetUtils.connect(sock, curTarget, socketTimeout);
    sock.setSoTimeout(targets.length * socketTimeout);

    long writeTimeout = socketWriteTimeout + 
                        HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
    OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
    out = new DataOutputStream(new BufferedOutputStream(baseStream, 
                                                        SMALL_BUFFER_SIZE));

    blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, 
        datanode);
    DatanodeInfo srcNode = new DatanodeInfo(dnRegistration);

    //
    // Header info
    //
    out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
    out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
    out.writeLong(b.getBlockId());
    out.writeLong(b.getGenerationStamp());
    out.writeInt(0);           // no pipelining
    out.writeBoolean(false);   // not part of recovery
    Text.writeString(out, ""); // client
    out.writeBoolean(true); // sending src node information
    srcNode.write(out); // Write src node DatanodeInfo
    // write targets
    out.writeInt(targets.length - 1);
    for (int i = 1; i < targets.length; i++) {
      targets[i].write(out);
    }
    Token<BlockTokenIdentifier> accessToken = BlockTokenSecretManager.DUMMY_TOKEN;
    if (isBlockTokenEnabled) {
      accessToken = blockTokenSecretManager.generateToken(null, b,
          EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
    }
    accessToken.write(out);
    // send data & checksum
    blockSender.sendBlock(out, baseStream, null);

    // no response necessary
    LOG.info(dnRegistration + ":Transmitted block " + b + " to " + curTarget);

  } catch (IOException ie) {
    LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + targets[0].getName()
        + " got " + StringUtils.stringifyException(ie));
    // check if there are any disk problem
    datanode.checkDiskError();
    
  } finally {
    xmitsInProgress.getAndDecrement();
    IOUtils.closeStream(blockSender);
    IOUtils.closeStream(out);
    IOUtils.closeSocket(sock);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:68,代码来源:DataNode.java

示例3: run

/**
 * Do the deed, write the bytes
 */
public void run() {
  xmitsInProgress.getAndIncrement();
  Socket sock = null;
  DataOutputStream out = null;
  BlockSender blockSender = null;
  
  try {
    InetSocketAddress curTarget = 
      NetUtils.createSocketAddr(targets[0].getName());
    sock = newSocket();
    NetUtils.connect(sock, curTarget, socketTimeout);
    sock.setSoTimeout(targets.length * socketTimeout);

    long writeTimeout = socketWriteTimeout + 
                        HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
    OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
    out = new DataOutputStream(new BufferedOutputStream(baseStream, 
                                                        SMALL_BUFFER_SIZE));

    blockSender = new BlockSender(b, 0, b.getNumBytes(), 
        false, false, false, datanode);
    DatanodeInfo srcNode = new DatanodeInfo(dnRegistration);

    //
    // Header info
    //
    Token<BlockTokenIdentifier> accessToken = BlockTokenSecretManager.DUMMY_TOKEN;
    if (isBlockTokenEnabled) {
      accessToken = blockTokenSecretManager.generateToken(null, b,
      EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE));
    }
    DataTransferProtocol.Sender.opWriteBlock(out,
        b, 0, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, "",
        srcNode, targets, accessToken);

    // send data & checksum
    blockSender.sendBlock(out, baseStream, null);

    // no response necessary
    LOG.info(dnRegistration + ":Transmitted block " + b + " to " + curTarget);

  } catch (IOException ie) {
    LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + targets[0].getName()
        + " got " + StringUtils.stringifyException(ie));
    // check if there are any disk problem
    datanode.checkDiskError();
    
  } finally {
    xmitsInProgress.getAndDecrement();
    IOUtils.closeStream(blockSender);
    IOUtils.closeStream(out);
    IOUtils.closeSocket(sock);
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:57,代码来源:DataNode.java

示例4: executeRepairTask

/**
 * submit a tree-structured block recovery job to the root node,
 * and wait for it to complete.
 * 
 * @param treeRoot
 *        the newcomer
 * @param header
 *        recovery job header
 * @return
 *        true if succeeds, otherwise false
 */
boolean executeRepairTask(RecoverTreeNode treeRoot, 
		MergeBlockHeader header) {

	String rootHost = treeRoot.getHostName();
	Socket rootSocket = null;
	DataOutputStream rootOutput = null;
	DataInputStream rootInput = null;
	boolean returnvalue = false;
	
	try{
		InetSocketAddress rootAddr = NetUtils.createSocketAddr(rootHost);
		rootSocket = new Socket();

		/**
		 * To Do: We should multiply timeout according to children
		 * number of subnodes. I'm not sure the current way is OK.
		 */
		int timeoutValue = HdfsConstants.READ_TIMEOUT 
				+ HdfsConstants.READ_TIMEOUT_EXTENSION * 10;
		int writeTimeout = HdfsConstants.WRITE_TIMEOUT 
				+ HdfsConstants.WRITE_TIMEOUT_EXTENSION;
		NetUtils.connect(rootSocket, rootAddr, timeoutValue);
		rootSocket.setSoTimeout(timeoutValue);
		rootSocket.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
		rootOutput = new DataOutputStream(new BufferedOutputStream(
				NetUtils.getOutputStream(rootSocket, writeTimeout),
				FSConstants.SMALL_BUFFER_SIZE));
		rootInput = new DataInputStream(NetUtils.getInputStream(rootSocket));
		
		header.writeVersionAndOpCode(rootOutput);
		header.write(rootOutput);
		treeRoot.write(rootOutput);
		rootOutput.flush();
				
		// wait for the reconstruction to complete
		int status = rootInput.readInt();
		if(status < 0) {
			throw new IOException("Root node: " + rootHost 
					+ " return error status  during reconstructing block.");
		}
			
		returnvalue = true;
	} catch (IOException ioe) {
		LOG.error("NTar: executeTreeReconstructJob: error occurred during reconstructing block: " + ioe);
		returnvalue = false;
	} finally {
		IOUtils.closeStream(rootOutput);
		IOUtils.closeStream(rootInput);
		IOUtils.closeSocket(rootSocket);
	}
	return returnvalue;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:63,代码来源:BlockReconstructor.java

示例5: createBlockOutputStream

private boolean createBlockOutputStream(DatanodeInfo[] nodes, String client,
                boolean recoveryFlag) {
  String firstBadLink = "";
  if (LOG.isDebugEnabled()) {
    for (int i = 0; i < nodes.length; i++) {
      LOG.debug("pipeline = " + nodes[i].getName());
    }
  }

  // persist blocks on namenode on next flush
  persistBlocks = true;

  try {
    LOG.debug("Connecting to " + nodes[0].getName());
    InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
    s = socketFactory.createSocket();
    int timeoutValue = 3000 * nodes.length + socketTimeout;
    NetUtils.connect(s, target, timeoutValue);
    s.setSoTimeout(timeoutValue);
    s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    LOG.debug("Send buf size " + s.getSendBufferSize());
    long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length +
                        datanodeWriteTimeout;

    //
    // Xmit header info to datanode
    //
    DataOutputStream out = new DataOutputStream(
        new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), 
                                 DataNode.SMALL_BUFFER_SIZE));
    blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));

    out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
    out.write( DataTransferProtocol.OP_WRITE_BLOCK );
    out.writeLong( block.getBlockId() );
    out.writeLong( block.getGenerationStamp() );
    out.writeInt( nodes.length );
    out.writeBoolean( recoveryFlag );       // recovery flag
    Text.writeString( out, client );
    out.writeBoolean(false); // Not sending src node information
    out.writeInt( nodes.length - 1 );
    for (int i = 1; i < nodes.length; i++) {
      nodes[i].write(out);
    }
    checksum.writeHeader( out );
    out.flush();

    // receive ack for connect
    firstBadLink = Text.readString(blockReplyStream);
    if (firstBadLink.length() != 0) {
      throw new IOException("Bad connect ack with firstBadLink " + firstBadLink);
    }

    blockStream = out;
    return true;     // success

  } catch (IOException ie) {

    LOG.info("Exception in createBlockOutputStream " + ie);

    // find the datanode that matches
    if (firstBadLink.length() != 0) {
      for (int i = 0; i < nodes.length; i++) {
        if (nodes[i].getName().equals(firstBadLink)) {
          errorIndex = i;
          break;
        }
      }
    }
    hasError = true;
    setLastException(ie);
    blockReplyStream = null;
    return false;  // error
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:75,代码来源:DFSClient.java

示例6: run

/**
 * Do the deed, write the bytes
 */
public void run() {
  xmitsInProgress.getAndIncrement();
  Socket sock = null;
  DataOutputStream out = null;
  BlockSender blockSender = null;
  
  try {
    InetSocketAddress curTarget = 
      NetUtils.createSocketAddr(targets[0].getName());
    sock = newSocket();
    NetUtils.connect(sock, curTarget, socketTimeout);
    sock.setSoTimeout(targets.length * socketTimeout);

    long writeTimeout = socketWriteTimeout + 
                        HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
    OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
    out = new DataOutputStream(new BufferedOutputStream(baseStream, 
                                                        SMALL_BUFFER_SIZE));

    blockSender = new BlockSender(b, 0, b.getNumBytes(), false, false, false, 
        datanode);
    DatanodeInfo srcNode = new DatanodeInfo(dnRegistration);

    //
    // Header info
    //
    out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
    out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
    out.writeLong(b.getBlockId());
    out.writeLong(b.getGenerationStamp());
    out.writeInt(0);           // no pipelining
    out.writeBoolean(false);   // not part of recovery
    Text.writeString(out, ""); // client
    out.writeBoolean(true); // sending src node information
    srcNode.write(out); // Write src node DatanodeInfo
    // write targets
    out.writeInt(targets.length - 1);
    for (int i = 1; i < targets.length; i++) {
      targets[i].write(out);
    }
    // send data & checksum
    blockSender.sendBlock(out, baseStream, null);

    // no response necessary
    LOG.info(dnRegistration + ":Transmitted block " + b + " to " + curTarget);

  } catch (IOException ie) {
    LOG.warn(dnRegistration + ":Failed to transfer " + b + " to " + targets[0].getName()
        + " got " + StringUtils.stringifyException(ie));
  } finally {
    xmitsInProgress.getAndDecrement();
    IOUtils.closeStream(blockSender);
    IOUtils.closeStream(out);
    IOUtils.closeSocket(sock);
  }
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:59,代码来源:DataNode.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.WRITE_TIMEOUT_EXTENSION属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。