当前位置: 首页>>代码示例>>Java>>正文


Java PipelineAck类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck的典型用法代码示例。如果您正苦于以下问题:Java PipelineAck类的具体用法?Java PipelineAck怎么用?Java PipelineAck使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


PipelineAck类属于org.apache.hadoop.hdfs.protocol.datatransfer包,在下文中一共展示了PipelineAck类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDataTransferProtocol.java

示例2: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:TestDataTransferProtocol.java

示例3: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
    throws IOException {
  PacketHeader hdr = new PacketHeader(8,                   // size of packet
      block.getNumBytes(), // OffsetInBlock
      100,                 // sequencenumber
      true,                // lastPacketInBlock
      0,                   // chunk length
      false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:TestDataTransferProtocol.java

示例4: channelRead0

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
@Override
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
  Status reply = getStatus(ack);
  if (reply != Status.SUCCESS) {
    failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " +
      block + " from datanode " + ctx.channel().remoteAddress()));
    return;
  }
  if (PipelineAck.isRestartOOBStatus(reply)) {
    failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " +
      block + " from datanode " + ctx.channel().remoteAddress()));
    return;
  }
  if (ack.getSeqno() == HEART_BEAT_SEQNO) {
    return;
  }
  completed(ctx.channel());
}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:FanOutOneBlockAsyncDFSOutput.java

示例5: sendOOBResponse

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * Send an OOB response. If all acks have been sent already for the block
 * and the responder is about to close, the delivery is not guaranteed.
 * This is because the other end can close the connection independently.
 * An OOB coming from downstream will be automatically relayed upstream
 * by the responder. This method is used only by originating datanode.
 *
 * @param ackStatus the type of ack to be sent
 */
void sendOOBResponse(final Status ackStatus) throws IOException,
    InterruptedException {
  if (!running) {
    LOG.info("Cannot send OOB response " + ackStatus + 
        ". Responder not running.");
    return;
  }

  synchronized(this) {
    if (sending) {
      wait(PipelineAck.getOOBTimeout(ackStatus));
      // Didn't get my turn in time. Give up.
      if (sending) {
        throw new IOException("Could not send OOB reponse in time: "
            + ackStatus);
      }
    }
    sending = true;
  }

  LOG.info("Sending an out of band ack of type " + ackStatus);
  try {
    sendAckUpstreamUnprotected(null, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
        PipelineAck.combineHeader(datanode.getECN(), ackStatus));
  } finally {
    // Let others send ack. Unless there are miltiple OOB send
    // calls, there can be only one waiter, the responder thread.
    // In any case, only one needs to be notified.
    synchronized(this) {
      sending = false;
      notify();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:BlockReceiver.java

示例6: sendAckUpstream

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * The wrapper for the unprotected version. This is only called by
 * the responder's run() method.
 *
 * @param ack Ack received from downstream
 * @param seqno sequence number of ack to be sent upstream
 * @param totalAckTimeNanos total ack time including all the downstream
 *          nodes
 * @param offsetInBlock offset in block for the data in packet
 * @param myHeader the local ack header
 */
private void sendAckUpstream(PipelineAck ack, long seqno,
    long totalAckTimeNanos, long offsetInBlock,
    int myHeader) throws IOException {
  try {
    // Wait for other sender to finish. Unless there is an OOB being sent,
    // the responder won't have to wait.
    synchronized(this) {
      while(sending) {
        wait();
      }
      sending = true;
    }

    try {
      if (!running) return;
      sendAckUpstreamUnprotected(ack, seqno, totalAckTimeNanos,
          offsetInBlock, myHeader);
    } finally {
      synchronized(this) {
        sending = false;
        notify();
      }
    }
  } catch (InterruptedException ie) {
    // The responder was interrupted. Make it go down without
    // interrupting the receiver(writer) thread.  
    running = false;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:BlockReceiver.java

示例7: TestPipeLineAckCompatibility

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
@Test
public void TestPipeLineAckCompatibility() throws IOException {
  DataTransferProtos.PipelineAckProto proto = DataTransferProtos
      .PipelineAckProto.newBuilder()
      .setSeqno(0)
      .addReply(Status.CHECKSUM_OK)
      .build();

  DataTransferProtos.PipelineAckProto newProto = DataTransferProtos
      .PipelineAckProto.newBuilder().mergeFrom(proto)
      .addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED,
                                         Status.CHECKSUM_OK))
      .build();

  ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream();
  proto.writeDelimitedTo(oldAckBytes);
  PipelineAck oldAck = new PipelineAck();
  oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray()));
  assertEquals(
      PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.CHECKSUM_OK),
      oldAck.getHeaderFlag(0));

  PipelineAck newAck = new PipelineAck();
  ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream();
  newProto.writeDelimitedTo(newAckBytes);
  newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray()));
  assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED,
                                         Status.CHECKSUM_OK),
               newAck.getHeaderFlag(0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDataTransferProtocol.java

示例8: sendOOBResponse

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * Send an OOB response. If all acks have been sent already for the block
 * and the responder is about to close, the delivery is not guaranteed.
 * This is because the other end can close the connection independently.
 * An OOB coming from downstream will be automatically relayed upstream
 * by the responder. This method is used only by originating datanode.
 *
 * @param ackStatus the type of ack to be sent
 */
void sendOOBResponse(final Status ackStatus) throws IOException,
    InterruptedException {
  if (!running) {
    LOG.info("Cannot send OOB response " + ackStatus + 
        ". Responder not running.");
    return;
  }

  synchronized(this) {
    if (sending) {
      wait(datanode.getOOBTimeout(ackStatus));
      // Didn't get my turn in time. Give up.
      if (sending) {
        throw new IOException("Could not send OOB reponse in time: "
            + ackStatus);
      }
    }
    sending = true;
  }

  LOG.info("Sending an out of band ack of type " + ackStatus);
  try {
    sendAckUpstreamUnprotected(null, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
        PipelineAck.combineHeader(datanode.getECN(), ackStatus));
  } finally {
    // Let others send ack. Unless there are miltiple OOB send
    // calls, there can be only one waiter, the responder thread.
    // In any case, only one needs to be notified.
    synchronized(this) {
      sending = false;
      notify();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:44,代码来源:BlockReceiver.java

示例9: getECN

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * The ECN bit for the DataNode. The DataNode should return:
 * <ul>
 *   <li>ECN.DISABLED when ECN is disabled.</li>
 *   <li>ECN.SUPPORTED when ECN is enabled but the DN still has capacity.</li>
 *   <li>ECN.CONGESTED when ECN is enabled and the DN is congested.</li>
 * </ul>
 */
public PipelineAck.ECN getECN() {
  if (!pipelineSupportECN) {
    return PipelineAck.ECN.DISABLED;
  }
  double load = ManagementFactory.getOperatingSystemMXBean()
      .getSystemLoadAverage();
  return load > NUM_CORES * CONGESTION_RATIO ? PipelineAck.ECN.CONGESTED :
      PipelineAck.ECN.SUPPORTED;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:DataNode.java

示例10: TestPipeLineAckCompatibility

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
@Test
public void TestPipeLineAckCompatibility() throws IOException {
  DataTransferProtos.PipelineAckProto proto = DataTransferProtos
      .PipelineAckProto.newBuilder()
      .setSeqno(0)
      .addReply(Status.CHECKSUM_OK)
      .build();

  DataTransferProtos.PipelineAckProto newProto = DataTransferProtos
      .PipelineAckProto.newBuilder().mergeFrom(proto)
      .addFlag(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED,
                                         Status.CHECKSUM_OK))
      .build();

  ByteArrayOutputStream oldAckBytes = new ByteArrayOutputStream();
  proto.writeDelimitedTo(oldAckBytes);
  PipelineAck oldAck = new PipelineAck();
  oldAck.readFields(new ByteArrayInputStream(oldAckBytes.toByteArray()));
  assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status
      .CHECKSUM_OK), oldAck.getHeaderFlag(0));

  PipelineAck newAck = new PipelineAck();
  ByteArrayOutputStream newAckBytes = new ByteArrayOutputStream();
  newProto.writeDelimitedTo(newAckBytes);
  newAck.readFields(new ByteArrayInputStream(newAckBytes.toByteArray()));
  assertEquals(PipelineAck.combineHeader(PipelineAck.ECN.SUPPORTED, Status
      .CHECKSUM_OK), newAck.getHeaderFlag(0));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:TestDataTransferProtocol.java

示例11: testECNFlag

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
@Test
public void testECNFlag() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, true);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
    Assert.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:TestDataNodeECN.java

示例12: sendOOBResponse

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * Send an OOB response. If all acks have been sent already for the block
 * and the responder is about to close, the delivery is not guaranteed.
 * This is because the other end can close the connection independently.
 * An OOB coming from downstream will be automatically relayed upstream
 * by the responder. This method is used only by originating datanode.
 *
 * @param ackStatus the type of ack to be sent
 */
void sendOOBResponse(final Status ackStatus) throws IOException,
    InterruptedException {
  if (!running) {
    LOG.info("Cannot send OOB response " + ackStatus + 
        ". Responder not running.");
    return;
  }

  synchronized(this) {
    if (sending) {
      wait(PipelineAck.getOOBTimeout(ackStatus));
      // Didn't get my turn in time. Give up.
      if (sending) {
        throw new IOException("Could not send OOB reponse in time: "
            + ackStatus);
      }
    }
    sending = true;
  }

  LOG.info("Sending an out of band ack of type " + ackStatus);
  try {
    sendAckUpstreamUnprotected(null, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
        ackStatus);
  } finally {
    // Let others send ack. Unless there are miltiple OOB send
    // calls, there can be only one waiter, the responder thread.
    // In any case, only one needs to be notified.
    synchronized(this) {
      sending = false;
      notify();
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:44,代码来源:BlockReceiver.java

示例13: sendAckUpstream

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * The wrapper for the unprotected version. This is only called by
 * the responder's run() method.
 *
 * @param ack Ack received from downstream
 * @param seqno sequence number of ack to be sent upstream
 * @param totalAckTimeNanos total ack time including all the downstream
 *          nodes
 * @param offsetInBlock offset in block for the data in packet
 * @param myStatus the local ack status
 */
private void sendAckUpstream(PipelineAck ack, long seqno,
    long totalAckTimeNanos, long offsetInBlock,
    Status myStatus) throws IOException {
  try {
    // Wait for other sender to finish. Unless there is an OOB being sent,
    // the responder won't have to wait.
    synchronized(this) {
      while(sending) {
        wait();
      }
      sending = true;
    }

    try {
      if (!running) return;
      sendAckUpstreamUnprotected(ack, seqno, totalAckTimeNanos,
          offsetInBlock, myStatus);
    } finally {
      synchronized(this) {
        sending = false;
        notify();
      }
    }
  } catch (InterruptedException ie) {
    // The responder was interrupted. Make it go down without
    // interrupting the receiver(writer) thread.  
    running = false;
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:41,代码来源:BlockReceiver.java

示例14: sendOOB

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
public void sendOOB() throws IOException, InterruptedException {
  ((PacketResponder) responder.getRunnable()).sendOOBResponse(PipelineAck
      .getRestartOOBStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:BlockReceiver.java

示例15: sendAckUpstreamUnprotected

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入依赖的package包/类
/**
 * @param ack Ack received from downstream
 * @param seqno sequence number of ack to be sent upstream
 * @param totalAckTimeNanos total ack time including all the downstream
 *          nodes
 * @param offsetInBlock offset in block for the data in packet
 * @param myHeader the local ack header
 */
private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno,
    long totalAckTimeNanos, long offsetInBlock, int myHeader)
    throws IOException {
  final int[] replies;
  if (ack == null) {
    // A new OOB response is being sent from this node. Regardless of
    // downstream nodes, reply should contain one reply.
    replies = new int[] { myHeader };
  } else if (mirrorError) { // ack read error
    int h = PipelineAck.combineHeader(datanode.getECN(), Status.SUCCESS);
    int h1 = PipelineAck.combineHeader(datanode.getECN(), Status.ERROR);
    replies = new int[] {h, h1};
  } else {
    short ackLen = type == PacketResponderType.LAST_IN_PIPELINE ? 0 : ack
        .getNumOfReplies();
    replies = new int[ackLen + 1];
    replies[0] = myHeader;
    for (int i = 0; i < ackLen; ++i) {
      replies[i + 1] = ack.getHeaderFlag(i);
    }
    // If the mirror has reported that it received a corrupt packet,
    // do self-destruct to mark myself bad, instead of making the
    // mirror node bad. The mirror is guaranteed to be good without
    // corrupt data on disk.
    if (ackLen > 0 && PipelineAck.getStatusFromHeader(replies[1]) ==
      Status.ERROR_CHECKSUM) {
      throw new IOException("Shutting down writer and responder "
          + "since the down streams reported the data sent by this "
          + "thread is corrupt");
    }
  }
  PipelineAck replyAck = new PipelineAck(seqno, replies,
      totalAckTimeNanos);
  if (replyAck.isSuccess()
      && offsetInBlock > replicaInfo.getBytesAcked()) {
    replicaInfo.setBytesAcked(offsetInBlock);
  }
  // send my ack back to upstream datanode
  long begin = Time.monotonicNow();
  replyAck.write(upstreamOut);
  upstreamOut.flush();
  long duration = Time.monotonicNow() - begin;
  if (duration > datanodeSlowLogThresholdMs) {
    LOG.warn("Slow PacketResponder send ack to upstream took " + duration
        + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString
        + ", replyAck=" + replyAck);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug(myString + ", replyAck=" + replyAck);
  }

  // If a corruption was detected in the received data, terminate after
  // sending ERROR_CHECKSUM back.
  Status myStatus = PipelineAck.getStatusFromHeader(myHeader);
  if (myStatus == Status.ERROR_CHECKSUM) {
    throw new IOException("Shutting down writer and responder "
        + "due to a checksum error in received data. The error "
        + "response has been sent upstream.");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:BlockReceiver.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。