当前位置: 首页>>代码示例>>Java>>正文


Java DataTransferProtoUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil的典型用法代码示例。如果您正苦于以下问题:Java DataTransferProtoUtil类的具体用法?Java DataTransferProtoUtil怎么用?Java DataTransferProtoUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DataTransferProtoUtil类属于org.apache.hadoop.hdfs.protocol.datatransfer包,在下文中一共展示了DataTransferProtoUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSClient.java

示例2: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(), lb);

  try {
    DataOutputStream out = new DataOutputStream(
        new BufferedOutputStream(pair.out, smallBufferSize));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " +
        dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelperClient.convert(
        reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtilsClient.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:35,代码来源:DFSClient.java

示例3: checkSuccess

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
    + ", self=" + peer.getLocalAddressString()
    + ", remote=" + peer.getRemoteAddressString()
    + ", for file " + file
    + ", for pool " + block.getBlockPoolId()
    + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:RemoteBlockReader2.java

示例4: receiveResponse

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
  BlockOpResponseProto response =
      BlockOpResponseProto.parseFrom(vintPrefixed(in));
  while (response.getStatus() == Status.IN_PROGRESS) {
    // read intermediate responses
    response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
  }
  String logInfo = "block move is failed";
  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:Dispatcher.java

示例5: writeSuccessWithChecksumInfo

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
    DataOutputStream out) throws IOException {

  ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
    .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
    .setChunkOffset(blockSender.getOffset())
    .build();
    
  BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
    .setStatus(SUCCESS)
    .setReadOpChecksumInfo(ckInfo)
    .build();
  response.writeDelimitedTo(out);
  out.flush();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:DataXceiver.java

示例6: checkSuccess

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  String logInfo = "for OP_READ_BLOCK"
      + ", self=" + peer.getLocalAddressString()
      + ", remote=" + peer.getRemoteAddressString()
      + ", for file " + file
      + ", for pool " + block.getBlockPoolId()
      + " block " + block.getBlockId() + "_" + block.getGenerationStamp();
  DataTransferProtoUtil.checkBlockOpStatus(status, logInfo);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:RemoteBlockReader2.java

示例7: receiveResponse

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/** Receive a reportedBlock copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException {
  BlockOpResponseProto response =
      BlockOpResponseProto.parseFrom(vintPrefixed(in));
  while (response.getStatus() == Status.IN_PROGRESS) {
    // read intermediate responses
    response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
  }
  String logInfo = "reportedBlock move is failed";
  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:12,代码来源:Dispatcher.java

示例8: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param sock
 *     An established Socket to the DN. The BlockReader will not close it
 *     normally
 * @param file
 *     File location
 * @param block
 *     The block object
 * @param blockToken
 *     The block token for security
 * @param startOffset
 *     The read offset, relative to block head
 * @param len
 *     The number of bytes to read
 * @param bufferSize
 *     The IO buffer size (not the client buffer size)
 * @param verifyChecksum
 *     Whether to verify checksum
 * @param clientName
 *     Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(Socket sock, String file,
    ExtendedBlock block, Token<BlockTokenIdentifier> blockToken,
    long startOffset, long len, int bufferSize, boolean verifyChecksum,
    String clientName) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(NetUtils.getInputStream(sock), bufferSize));
  
  BlockOpResponseProto status =
      BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, sock, block, file);
  ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo();
  DataChecksum checksum =
      DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if (firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
        firstChunkOffset + ") startOffset is " +
        startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(),
      block.getBlockId(), in, checksum, verifyChecksum, startOffset,
      firstChunkOffset, len, sock);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:65,代码来源:RemoteBlockReader.java

示例9: writeSuccessWithChecksumInfo

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
    DataOutputStream out) throws IOException {

  ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
      .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
      .setChunkOffset(blockSender.getOffset()).build();

  BlockOpResponseProto response =
      BlockOpResponseProto.newBuilder().setStatus(SUCCESS)
          .setReadOpChecksumInfo(ckInfo).build();
  response.writeDelimitedTo(out);
  out.flush();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:DataXceiver.java

示例10: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
                                   ExtendedBlock block, 
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, Peer peer,
                                   DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy)
                                     throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));
  
  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:RemoteBlockReader.java

示例11: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
                                   ExtendedBlock block,
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   boolean verifyChecksum,
                                   String clientName,
                                   Peer peer, DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
        peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
      checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
      datanodeID, peerCache);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:RemoteBlockReader2.java

示例12: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
    ExtendedBlock block,
    Token<BlockTokenIdentifier> blockToken,
    long startOffset, long len,
    int bufferSize, boolean verifyChecksum,
    String clientName, Peer peer,
    DatanodeID datanodeID,
    PeerCache peerCache,
    CachingStrategy cachingStrategy,
    Tracer tracer)
    throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelperClient.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
      status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
        firstChunkOffset + ") startOffset is " +
        startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache, tracer);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:62,代码来源:RemoteBlockReader.java

示例13: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
    ExtendedBlock block,
    Token<BlockTokenIdentifier> blockToken,
    long startOffset, long len,
    boolean verifyChecksum,
    String clientName,
    Peer peer, DatanodeID datanodeID,
    PeerCache peerCache,
    CachingStrategy cachingStrategy,
    Tracer tracer) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelperClient.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
      status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
        firstChunkOffset + ") startOffset is " +
        startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockId(), checksum,
      verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID,
      peerCache, tracer);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:60,代码来源:RemoteBlockReader2.java

示例14: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param sock  An established Socket to the DN. The BlockReader will not close it normally
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param bufferSize  The IO buffer size (not the client buffer size)
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @return New BlockReader instance, or null on error.
 */
public static RemoteBlockReader newBlockReader(String file,
                                   ExtendedBlock block, 
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   int bufferSize, boolean verifyChecksum,
                                   String clientName, Peer peer,
                                   DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy)
                                     throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out =
      new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);
  
  //
  // Get bytes in block, set streams
  //

  DataInputStream in = new DataInputStream(
      new BufferedInputStream(peer.getInputStream(), bufferSize));
  
  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  RemoteBlockReader2.checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?
  
  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();
  
  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " + 
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
      in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
      peer, datanodeID, peerCache);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:62,代码来源:RemoteBlockReader.java

示例15: newBlockReader

import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; //导入依赖的package包/类
/**
 * Create a new BlockReader specifically to satisfy a read.
 * This method also sends the OP_READ_BLOCK request.
 *
 * @param sock  An established Socket to the DN. The BlockReader will not close it normally.
 *             This socket must have an associated Channel.
 * @param file  File location
 * @param block  The block object
 * @param blockToken  The block token for security
 * @param startOffset  The read offset, relative to block head
 * @param len  The number of bytes to read
 * @param verifyChecksum  Whether to verify checksum
 * @param clientName  Client name
 * @param peer  The Peer to use
 * @param datanodeID  The DatanodeID this peer is connected to
 * @return New BlockReader instance, or null on error.
 */
public static BlockReader newBlockReader(String file,
                                   ExtendedBlock block,
                                   Token<BlockTokenIdentifier> blockToken,
                                   long startOffset, long len,
                                   boolean verifyChecksum,
                                   String clientName,
                                   Peer peer, DatanodeID datanodeID,
                                   PeerCache peerCache,
                                   CachingStrategy cachingStrategy) throws IOException {
  // in and out will be closed when sock is closed (by the caller)
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
        peer.getOutputStream()));
  new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
      verifyChecksum, cachingStrategy);

  //
  // Get bytes in block
  //
  DataInputStream in = new DataInputStream(peer.getInputStream());

  BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
      PBHelper.vintPrefixed(in));
  checkSuccess(status, peer, block, file);
  ReadOpChecksumInfoProto checksumInfo =
    status.getReadOpChecksumInfo();
  DataChecksum checksum = DataTransferProtoUtil.fromProto(
      checksumInfo.getChecksum());
  //Warning when we get CHECKSUM_NULL?

  // Read the first chunk offset.
  long firstChunkOffset = checksumInfo.getChunkOffset();

  if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
      firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
    throw new IOException("BlockReader: error in first chunk offset (" +
                          firstChunkOffset + ") startOffset is " +
                          startOffset + " for file " + file);
  }

  return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
      checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
      datanodeID, peerCache);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:61,代码来源:RemoteBlockReader2.java


注:本文中的org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。