当前位置: 首页>>代码示例>>Java>>正文


Java PBHelper.convert方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocolPB.PBHelper.convert方法的典型用法代码示例。如果您正苦于以下问题:Java PBHelper.convert方法的具体用法?Java PBHelper.convert怎么用?Java PBHelper.convert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocolPB.PBHelper的用法示例。


在下文中一共展示了PBHelper.convert方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: opTransferBlock

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
  final OpTransferBlockProto proto =
    OpTransferBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:Receiver.java

示例2: opRequestShortCircuitFds

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
  final OpRequestShortCircuitAccessProto proto =
    OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
  SlotId slotId = (proto.hasSlotId()) ? 
      PBHelper.convert(proto.getSlotId()) : null;
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getToken()),
        slotId, proto.getMaxVersion(),
        proto.getSupportsReceiptVerification());
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:Receiver.java

示例3: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSClient.java

示例4: opRequestShortCircuitFds

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */
private void opRequestShortCircuitFds(DataInputStream in) throws IOException {
  final OpRequestShortCircuitAccessProto proto =
    OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
  SlotId slotId = (proto.hasSlotId()) ? 
      PBHelper.convert(proto.getSlotId()) : null;
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getToken()),
        slotId, proto.getMaxVersion());
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:17,代码来源:Receiver.java

示例5: toProto

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
public static ChecksumProto toProto(DataChecksum checksum) {
  ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType());
  // ChecksumType#valueOf never returns null
  return ChecksumProto.newBuilder()
    .setBytesPerChecksum(checksum.getBytesPerChecksum())
    .setType(type)
    .build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:DataTransferProtoUtil.java

示例6: fromProto

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) return null;

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DataTransferProtoUtil.java

示例7: opWriteBlock

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:Receiver.java

示例8: doUpgrade

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
@Override
public DoUpgradeResponseProto doUpgrade(RpcController controller,
    DoUpgradeRequestProto request) throws ServiceException {
  StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE);
  try {
    impl.doUpgrade(convert(request.getJid()), si);
    return DoUpgradeResponseProto.getDefaultInstance();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:QJournalProtocolServerSideTranslatorPB.java

示例9: canRollBack

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
@Override
public CanRollBackResponseProto canRollBack(RpcController controller,
    CanRollBackRequestProto request) throws ServiceException {
  try {
    StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE);
    Boolean result = impl.canRollBack(convert(request.getJid()), si,
        PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE),
        request.getTargetLayoutVersion());
    return CanRollBackResponseProto.newBuilder()
        .setCanRollBack(result)
        .build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:QJournalProtocolServerSideTranslatorPB.java

示例10: opWriteBlock

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
          (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false));
   } finally {
    if (traceScope != null) traceScope.close();
   }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:28,代码来源:Receiver.java

示例11: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    
    if (reply.getStatus() != Status.SUCCESS) {
      if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
        throw new InvalidBlockTokenException();
      } else {
        throw new IOException("Bad response " + reply + " trying to read "
            + lb.getBlock() + " from datanode " + dn);
      }
    }
    
    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:40,代码来源:DFSClient.java

示例12: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb
 *     the located block
 * @param clientName
 *     the name of the DFSClient requesting the checksum
 * @param dn
 *     the connected datanode
 * @return the inferred checksum type
 * @throws IOException
 *     if an error occurs
 */
private static Type inferChecksumTypeByReading(String clientName,
    SocketFactory socketFactory, int socketTimeout, LocatedBlock lb,
    DatanodeInfo dn, DataEncryptionKey encryptionKey,
    boolean connectToDnViaHostname) throws IOException {
  IOStreamPair pair =
      connectToDN(socketFactory, connectToDnViaHostname, encryptionKey, dn,
          socketTimeout);

  try {
    DataOutputStream out = new DataOutputStream(
        new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out)
        .readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    
    if (reply.getStatus() != Status.SUCCESS) {
      if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
        throw new InvalidBlockTokenException();
      } else {
        throw new IOException(
            "Bad response " + reply + " trying to read " + lb.getBlock() +
                " from datanode " + dn);
      }
    }
    
    return PBHelper
        .convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:51,代码来源:DFSClient.java

示例13: fromProto

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) {
    return null;
  }

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:10,代码来源:DataTransferProtoUtil.java

示例14: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param in input stream from datanode
 * @param out output stream to datanode
 * @param lb the located block
 * @param clientName the name of the DFSClient requesting the checksum
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private static Type inferChecksumTypeByReading(
    String clientName, SocketFactory socketFactory, int socketTimeout,
    LocatedBlock lb, DatanodeInfo dn,
    DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
    throws IOException {
  IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
      encryptionKey, dn, socketTimeout);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    
    if (reply.getStatus() != Status.SUCCESS) {
      if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
        throw new InvalidBlockTokenException();
      } else {
        throw new IOException("Bad response " + reply + " trying to read "
            + lb.getBlock() + " from datanode " + dn);
      }
    }
    
    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:46,代码来源:DFSClient.java

示例15: requestNewShm

import org.apache.hadoop.hdfs.protocolPB.PBHelper; //导入方法依赖的package包/类
/**
 * Ask the DataNode for a new shared memory segment.  This function must be
 * called with the manager lock held.  We will release the lock while
 * communicating with the DataNode.
 *
 * @param clientName    The current client name.
 * @param peer          The peer to use to talk to the DataNode.
 *
 * @return              Null if the DataNode does not support shared memory
 *                        segments, or experienced an error creating the
 *                        shm.  The shared memory segment itself on success.
 * @throws IOException  If there was an error communicating over the socket.
 *                        We will not throw an IOException unless the socket
 *                        itself (or the network) is the problem.
 */
private DfsClientShm requestNewShm(String clientName, DomainPeer peer)
    throws IOException {
  final DataOutputStream out = 
      new DataOutputStream(
          new BufferedOutputStream(peer.getOutputStream()));
  new Sender(out).requestShortCircuitShm(clientName);
  ShortCircuitShmResponseProto resp = 
      ShortCircuitShmResponseProto.parseFrom(
          PBHelper.vintPrefixed(peer.getInputStream()));
  String error = resp.hasError() ? resp.getError() : "(unknown)";
  switch (resp.getStatus()) {
  case SUCCESS:
    DomainSocket sock = peer.getDomainSocket();
    byte buf[] = new byte[1];
    FileInputStream fis[] = new FileInputStream[1];
    if (sock.recvFileInputStreams(fis, buf, 0, buf.length) < 0) {
      throw new EOFException("got EOF while trying to transfer the " +
          "file descriptor for the shared memory segment.");
    }
    if (fis[0] == null) {
      throw new IOException("the datanode " + datanode + " failed to " +
          "pass a file descriptor for the shared memory segment.");
    }
    try {
      DfsClientShm shm = 
          new DfsClientShm(PBHelper.convert(resp.getId()),
              fis[0], this, peer);
      if (LOG.isTraceEnabled()) {
        LOG.trace(this + ": createNewShm: created " + shm);
      }
      return shm;
    } finally {
      IOUtils.cleanup(LOG,  fis[0]);
    }
  case ERROR_UNSUPPORTED:
    // The DataNode just does not support short-circuit shared memory
    // access, and we should stop asking.
    LOG.info(this + ": datanode does not support short-circuit " +
        "shared memory access: " + error);
    disabled = true;
    return null;
  default:
    // The datanode experienced some kind of unexpected error when trying to
    // create the short-circuit shared memory segment.
    LOG.warn(this + ": error requesting short-circuit shared memory " +
        "access: " + error);
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:DfsClientShmManager.java


注:本文中的org.apache.hadoop.hdfs.protocolPB.PBHelper.convert方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。