当前位置: 首页>>代码示例>>Java>>正文


Java PipelineAck.isSuccess方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.isSuccess方法的典型用法代码示例。如果您正苦于以下问题:Java PipelineAck.isSuccess方法的具体用法?Java PipelineAck.isSuccess怎么用?Java PipelineAck.isSuccess使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck的用法示例。


在下文中一共展示了PipelineAck.isSuccess方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sendAckUpstreamUnprotected

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入方法依赖的package包/类
/**
 * @param ack Ack received from downstream
 * @param seqno sequence number of ack to be sent upstream
 * @param totalAckTimeNanos total ack time including all the downstream
 *          nodes
 * @param offsetInBlock offset in block for the data in packet
 * @param myHeader the local ack header
 */
private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno,
    long totalAckTimeNanos, long offsetInBlock, int myHeader)
    throws IOException {
  final int[] replies;
  if (ack == null) {
    // A new OOB response is being sent from this node. Regardless of
    // downstream nodes, reply should contain one reply.
    replies = new int[] { myHeader };
  } else if (mirrorError) { // ack read error
    int h = PipelineAck.combineHeader(datanode.getECN(), Status.SUCCESS);
    int h1 = PipelineAck.combineHeader(datanode.getECN(), Status.ERROR);
    replies = new int[] {h, h1};
  } else {
    short ackLen = type == PacketResponderType.LAST_IN_PIPELINE ? 0 : ack
        .getNumOfReplies();
    replies = new int[ackLen + 1];
    replies[0] = myHeader;
    for (int i = 0; i < ackLen; ++i) {
      replies[i + 1] = ack.getHeaderFlag(i);
    }
    // If the mirror has reported that it received a corrupt packet,
    // do self-destruct to mark myself bad, instead of making the
    // mirror node bad. The mirror is guaranteed to be good without
    // corrupt data on disk.
    if (ackLen > 0 && PipelineAck.getStatusFromHeader(replies[1]) ==
      Status.ERROR_CHECKSUM) {
      throw new IOException("Shutting down writer and responder "
          + "since the down streams reported the data sent by this "
          + "thread is corrupt");
    }
  }
  PipelineAck replyAck = new PipelineAck(seqno, replies,
      totalAckTimeNanos);
  if (replyAck.isSuccess()
      && offsetInBlock > replicaInfo.getBytesAcked()) {
    replicaInfo.setBytesAcked(offsetInBlock);
  }
  // send my ack back to upstream datanode
  long begin = Time.monotonicNow();
  replyAck.write(upstreamOut);
  upstreamOut.flush();
  long duration = Time.monotonicNow() - begin;
  if (duration > datanodeSlowLogThresholdMs) {
    LOG.warn("Slow PacketResponder send ack to upstream took " + duration
        + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString
        + ", replyAck=" + replyAck);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug(myString + ", replyAck=" + replyAck);
  }

  // If a corruption was detected in the received data, terminate after
  // sending ERROR_CHECKSUM back.
  Status myStatus = PipelineAck.getStatusFromHeader(myHeader);
  if (myStatus == Status.ERROR_CHECKSUM) {
    throw new IOException("Shutting down writer and responder "
        + "due to a checksum error in received data. The error "
        + "response has been sent upstream.");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:BlockReceiver.java

示例2: sendAckUpstreamUnprotected

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入方法依赖的package包/类
/**
 * @param ack Ack received from downstream
 * @param seqno sequence number of ack to be sent upstream
 * @param totalAckTimeNanos total ack time including all the downstream
 *          nodes
 * @param offsetInBlock offset in block for the data in packet
 * @param myStatus the local ack status
 */
private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno,
    long totalAckTimeNanos, long offsetInBlock, Status myStatus)
    throws IOException {
  Status[] replies = null;
  if (ack == null) {
    // A new OOB response is being sent from this node. Regardless of
    // downstream nodes, reply should contain one reply.
    replies = new Status[1];
    replies[0] = myStatus;
  } else if (mirrorError) { // ack read error
    replies = MIRROR_ERROR_STATUS;
  } else {
    short ackLen = type == PacketResponderType.LAST_IN_PIPELINE ? 0 : ack
        .getNumOfReplies();
    replies = new Status[1 + ackLen];
    replies[0] = myStatus;
    for (int i = 0; i < ackLen; i++) {
      replies[i + 1] = ack.getReply(i);
    }
    // If the mirror has reported that it received a corrupt packet,
    // do self-destruct to mark myself bad, instead of making the 
    // mirror node bad. The mirror is guaranteed to be good without
    // corrupt data on disk.
    if (ackLen > 0 && replies[1] == Status.ERROR_CHECKSUM) {
      throw new IOException("Shutting down writer and responder "
          + "since the down streams reported the data sent by this "
          + "thread is corrupt");
    }
  }
  PipelineAck replyAck = new PipelineAck(seqno, replies,
      totalAckTimeNanos);
  if (replyAck.isSuccess()
      && offsetInBlock > replicaInfo.getBytesAcked()) {
    replicaInfo.setBytesAcked(offsetInBlock);
  }
  // send my ack back to upstream datanode
  long begin = Time.monotonicNow();
  replyAck.write(upstreamOut);
  upstreamOut.flush();
  long duration = Time.monotonicNow() - begin;
  if (duration > datanodeSlowLogThresholdMs) {
    LOG.warn("Slow PacketResponder send ack to upstream took " + duration
        + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString
        + ", replyAck=" + replyAck);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug(myString + ", replyAck=" + replyAck);
  }

  // If a corruption was detected in the received data, terminate after
  // sending ERROR_CHECKSUM back. 
  if (myStatus == Status.ERROR_CHECKSUM) {
    throw new IOException("Shutting down writer and responder "
        + "due to a checksum error in received data. The error "
        + "response has been sent upstream.");
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:65,代码来源:BlockReceiver.java

示例3: sendAckUpstreamUnprotected

import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; //导入方法依赖的package包/类
/**
 * @param ack Ack received from downstream
 * @param seqno sequence number of ack to be sent upstream
 * @param totalAckTimeNanos total ack time including all the downstream
 *          nodes
 * @param offsetInBlock offset in block for the data in packet
 * @param myStatus the local ack status
 */
private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno,
    long totalAckTimeNanos, long offsetInBlock, Status myStatus)
    throws IOException {
  Status[] replies = null;
  if (ack == null) {
    // A new OOB response is being sent from this node. Regardless of
    // downstream nodes, reply should contain one reply.
    replies = new Status[1];
    replies[0] = myStatus;
  } else if (mirrorError) { // ack read error
    replies = MIRROR_ERROR_STATUS;
  } else {
    short ackLen = type == PacketResponderType.LAST_IN_PIPELINE ? 0 : ack
        .getNumOfReplies();
    replies = new Status[1 + ackLen];
    replies[0] = myStatus;
    for (int i = 0; i < ackLen; i++) {
      replies[i + 1] = ack.getReply(i);
    }
    // If the mirror has reported that it received a corrupt packet,
    // do self-destruct to mark myself bad, instead of making the 
    // mirror node bad. The mirror is guaranteed to be good without
    // corrupt data on disk.
    if (ackLen > 0 && replies[1] == Status.ERROR_CHECKSUM) {
      throw new IOException("Shutting down writer and responder "
          + "since the down streams reported the data sent by this "
          + "thread is corrupt");
    }
  }
  PipelineAck replyAck = new PipelineAck(seqno, replies,
      totalAckTimeNanos);
  if (replyAck.isSuccess()
      && offsetInBlock > replicaInfo.getBytesAcked()) {
    replicaInfo.setBytesAcked(offsetInBlock);
  }
  // send my ack back to upstream datanode
  replyAck.write(upstreamOut);
  upstreamOut.flush();
  if (LOG.isDebugEnabled()) {
    LOG.debug(myString + ", replyAck=" + replyAck);
  }

  // If a corruption was detected in the received data, terminate after
  // sending ERROR_CHECKSUM back. 
  if (myStatus == Status.ERROR_CHECKSUM) {
    throw new IOException("Shutting down writer and responder "
        + "due to a checksum error in received data. The error "
        + "response has been sent upstream.");
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:59,代码来源:BlockReceiver.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.isSuccess方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。