当前位置: 首页>>代码示例>>Java>>正文


Java DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST属性的典型用法代码示例。如果您正苦于以下问题:Java DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST属性的具体用法?Java DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST怎么用?Java DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.protocol.DataTransferProtocol的用法示例。


在下文中一共展示了DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getOutPacketVersion

int getOutPacketVersion() throws IOException {
  if (ifPacketIncludeVersion()) {
    return this.preferredPacketVersion;
  } else {
    // If the server side runs on an older version that doesn't support
    // packet version, the older format that checksum is in the first
    // is used.
    //
    return DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:11,代码来源:DFSClient.java

示例2: getHeartbeatPacket

static DFSOutputStreamPacket getHeartbeatPacket(
    DFSOutputStream dfsOutputStream, boolean includePktVersion,
    int packetVersion) throws IOException {
  if (packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) {
    return new DFSOutputStreamPacketNonInlineChecksum(dfsOutputStream);
  } else if (!includePktVersion) {
    throw new IOException(
        "Older version doesn't support inline checksum packet format.");
  } else {
    return new DFSOutputStreamPacketInlineChecksum(dfsOutputStream);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:DFSOutputStreamPacketFactory.java

示例3: getPacket

static DFSOutputStreamPacket getPacket(DFSOutputStream dfsOutputStream,
    boolean includePktVersion, int packetVersion, int pktSize,
    int chunksPerPkt, long offsetInBlock, WritePacketClientProfile profile)
    throws IOException {
  if (packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) {
    return new DFSOutputStreamPacketNonInlineChecksum(dfsOutputStream,
        pktSize, chunksPerPkt, offsetInBlock, profile);
  } else if (!includePktVersion) {
    throw new IOException(
        "Older version doesn't support inline checksum packet format.");
  } else {
    return new DFSOutputStreamPacketInlineChecksum(dfsOutputStream, pktSize,
        chunksPerPkt, offsetInBlock, profile);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:DFSOutputStreamPacketFactory.java

示例4: verifyChunks

/**
 * Verify multiple CRC chunks. 
 */
private void verifyChunks( byte[] dataBuf, int dataOff, int len, 
                           byte[] checksumBuf, int checksumOff,
                           int firstChunkOffset, int packetVersion) 
                           throws IOException {
  int chunkOffset = firstChunkOffset;
  while (len > 0) {
    int chunkLen = Math.min(len, bytesPerChecksum - chunkOffset);
    chunkOffset = 0;
    
    checksum.update(dataBuf, dataOff, chunkLen);
    dataOff += chunkLen;

    boolean checksumCorrect;
    if (packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) {
      checksumCorrect = checksum.compare(checksumBuf, checksumOff);
      checksumOff += checksumSize;
    } else {
      // Expect packetVersion == DataTransferProtocol.PACKET_VERSION_CHECKSUM_INLINE
      checksumCorrect = checksum.compare(dataBuf, dataOff);
      dataOff += checksumSize;
    }

    if (!checksumCorrect) {
      if (srcDataNode != null) {
        try {
          LOG.info("report corrupt block " + block + " from datanode " +
                    srcDataNode + " to namenode");
          LocatedBlock lb = new LocatedBlock(block, 
                                          new DatanodeInfo[] {srcDataNode});
          datanode.reportBadBlocks(namespaceId, new LocatedBlock[] {lb});
        } catch (IOException e) {
          LOG.warn("Failed to report bad block " + block + 
                    " from datanode " + srcDataNode + " to namenode");
        }
      }
      throw new IOException("Unexpected checksum mismatch " + 
                            "while writing " + block + " from " + inAddr);
    }

    checksum.reset();
    len -= chunkLen;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:46,代码来源:BlockReceiver.java

示例5: getPreferredPacketVersion

@Override
public int getPreferredPacketVersion() {
  return DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:4,代码来源:BlockWithChecksumFileReader.java

示例6: initialize

private void initialize(int namespaceId, Block block, long blockLength,
    long startOffset, long length, boolean corruptChecksumOk,
    boolean chunkOffsetOK, boolean verifyChecksum, boolean transferToAllowed,
    boolean allowUpdateBlocrCrc, boolean pktIncludeVersion, boolean forceOldPktVersion,
    BlockWithChecksumFileReader.InputStreamWithChecksumFactory streamFactory,
    String clientTraceFmt) throws IOException {
  try {
    this.chunkOffsetOK = chunkOffsetOK;
    this.verifyChecksum = verifyChecksum;
    this.blockLength = blockLength;
    this.transferToAllowed = transferToAllowed;
    this.clientTraceFmt = clientTraceFmt;
    this.pktIncludeVersion = pktIncludeVersion;
    
    if (this.pktIncludeVersion && ! forceOldPktVersion) {
      this.packetVersion = blockReader.getPreferredPacketVersion();
    } else {
      this.packetVersion = DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST;
    }
    
    checksum = blockReader.getChecksumToSend(blockLength);
    
    bytesPerChecksum = blockReader.getBytesPerChecksum();
    checksumSize = blockReader.getChecksumSize();
    
    if (length < 0) {
      length = blockLength;
    }

    endOffset = blockLength;
    if (startOffset < 0 || startOffset > endOffset
        || (length + startOffset) > endOffset) {
      String msg = " Offset " + startOffset + " and length " + length
      + " don't match block " + block + " ( blockLen " + endOffset + " )";
      LOG.warn("sendBlock() : " + msg);
      throw new IOException(msg);
    }

    
    offset = (startOffset - (startOffset % bytesPerChecksum));
    if (length >= 0) {
      // Make sure endOffset points to end of a checksumed chunk.
      long tmpLen = startOffset + length;
      if (tmpLen % bytesPerChecksum != 0) {
        tmpLen += (bytesPerChecksum - tmpLen % bytesPerChecksum);
      }
      if (tmpLen < endOffset) {
        endOffset = tmpLen;
      }
    }
    
    // Recalculate block CRC if:
    // 1. it is configured to be allowed;
    // 2. the block is finalized
    // 3. the full block is to be read
    // 4. there is no Block CRC already cached
    // 5. the block format is CRC32 and checksum size is 4
    if (allowUpdateBlocrCrc &&
        (!transferToAllowed || verifyChecksum)
        && startOffset == 0
        && length >= blockLength
        && replicaToRead != null
        && !replicaToRead.hasBlockCrcInfo()
        && replicaToRead.isFinalized()
        && replicaToRead instanceof DatanodeBlockInfo
        && checksumSize == DataChecksum.DEFAULT_CHECKSUM_SIZE
        && checksum != null
        && (checksum.getChecksumType() == DataChecksum.CHECKSUM_CRC32 || checksum
            .getChecksumType() == DataChecksum.CHECKSUM_CRC32C)) {
      // Needs to recalculate block CRC
      crcUpdater = new BlockCrcUpdater(bytesPerChecksum, true);
    }
    
    seqno = 0;
    
    blockReader.initialize(offset, blockLength);
  } catch (IOException ioe) {
    IOUtils.closeStream(this);
    throw ioe;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:81,代码来源:BlockSender.java

示例7: writePacket

@Override
public void writePacket(byte pktBuf[], int len, int dataOff,
    int pktBufStartOff, int numChunks, int packetVersion) throws IOException {
  if (packetVersion != DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST) {
    throw new IOException(
        "non-inline checksum doesn't support packet version " + packetVersion);
  }
  if (len == 0) {
    return;
  }
  
  // finally write to the disk :
  blockDataWriter.write(pktBuf, dataOff, len);

  boolean lastChunkStartsFromChunkStart = false;
  if (firstChunkOffset > 0) {
    // packet doesn't start as beginning of the chunk, need to concatenate
    // checksums of two pieces.
    int crcPart2 = DataChecksum.getIntFromBytes(pktBuf, pktBufStartOff);
    partialCrcInt = CrcConcat.concatCrc(partialCrcInt, crcPart2,
        Math.min(len, bytesPerChecksum - firstChunkOffset));
    byte[] tempBuf = new byte[4];
    DataChecksum.writeIntToBuf(partialCrcInt, tempBuf, 0);
    checksumOut.write(tempBuf);
    if (numChunks > 1) {
      // write the other chunk's checksums.
      checksumOut.write(pktBuf, pktBufStartOff + checksumSize, (numChunks - 1)
          * checksumSize);
      lastChunkStartsFromChunkStart = true;
   }
  } else {
    checksumOut.write(pktBuf, pktBufStartOff, numChunks * checksumSize);
    lastChunkStartsFromChunkStart = true;
  }
  firstChunkOffset = (firstChunkOffset + len) % bytesPerChecksum;
  if (firstChunkOffset > 0 && lastChunkStartsFromChunkStart) {
    // The last chunk is partial and starts from the chunk boundary,
    // need to remember its checksum for the next chunk.
    partialCrcInt = DataChecksum.getIntFromBytes(pktBuf, pktBufStartOff
        + (numChunks - 1) * checksumSize);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:BlockWithChecksumFileWriter.java


注:本文中的org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PACKET_VERSION_CHECKSUM_FIRST属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。