当前位置: 首页>>代码示例>>Java>>正文


Java Status类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status的典型用法代码示例。如果您正苦于以下问题:Java Status类的具体用法?Java Status怎么用?Java Status使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Status类属于org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos包,在下文中一共展示了Status类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkBlockOpStatus

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DataTransferProtoUtil.java

示例2: PipelineAck

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
 * Constructor
 * @param seqno sequence number
 * @param replies an array of replies
 * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
 */
public PipelineAck(long seqno, int[] replies,
                   long downstreamAckTimeNanos) {
  ArrayList<Status> statusList = Lists.newArrayList();
  ArrayList<Integer> flagList = Lists.newArrayList();
  for (int r : replies) {
    statusList.add(StatusFormat.getStatus(r));
    flagList.add(r);
  }
  proto = PipelineAckProto.newBuilder()
    .setSeqno(seqno)
    .addAllReply(statusList)
    .addAllFlag(flagList)
    .setDownstreamAckTimeNanos(downstreamAckTimeNanos)
    .build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:PipelineAck.java

示例3: getOOBStatus

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:PipelineAck.java

示例4: transferBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DataXceiver.java

示例5: testUnalignedReads

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[] { 0, 3, 129 };
  int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                         " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestClientBlockVerification.java

示例6: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new int[] {PipelineAck.combineHeader
    (PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
    (recvOut);
  sendRecvData(description, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDataTransferProtocol.java

示例7: testWrite

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
    String description, Boolean eofExcepted) throws IOException {
  sendBuf.reset();
  recvBuf.reset();
  writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
  if (eofExcepted) {
    sendResponse(Status.ERROR, null, null, recvOut);
    sendRecvData(description, true);
  } else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
    //ok finally write a block with 0 len
    sendResponse(Status.SUCCESS, "", null, recvOut);
    sendRecvData(description, false);
  } else {
    writeZeroLengthPacket(block, description);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestDataTransferProtocol.java

示例8: checkBlockOpStatus

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
public static void checkBlockOpStatus(
        BlockOpResponseProto response,
        String logInfo) throws IOException {
  if (response.getStatus() != Status.SUCCESS) {
    if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
        "Got access token error"
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    } else {
      throw new IOException(
        "Got error"
        + ", status=" + response.getStatus().name()
        + ", status message " + response.getMessage()
        + ", " + logInfo
      );
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:DataTransferProtoUtil.java

示例9: getOOBStatus

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
 * Returns the OOB status if this ack contains one.
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status s : proto.getReplyList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
      return s;
    }
  }
  return null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:PipelineAck.java

示例10: transferBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final DatanodeInfo[] targets,
    final StorageType[] targetStorageTypes) throws IOException {
  checkAccess(socketOut, true, blk, blockToken,
      Op.TRANSFER_BLOCK, BlockTokenIdentifier.AccessMode.COPY);
  previousOpClientName = clientName;
  updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);

  final DataOutputStream out = new DataOutputStream(
      getOutputStream());
  try {
    datanode.transferReplicaForPipelineRecovery(blk, targets,
        targetStorageTypes, clientName);
    writeResponse(Status.SUCCESS, null, out);
  } catch (IOException ioe) {
    LOG.info("transferBlock " + blk + " received exception " + ioe);
    incrDatanodeNetworkErrors();
    throw ioe;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:DataXceiver.java

示例11: testUnalignedReads

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
 * Test various unaligned reads to make sure that we properly
 * account even when we don't start or end on a checksum boundary
 */
@Test
public void testUnalignedReads() throws Exception {
  int startOffsets[] = new int[]{0, 3, 129};
  int lengths[] = new int[]{30, 300, 512, 513, 1025};
  for (int startOffset : startOffsets) {
    for (int length : lengths) {
      DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
          " len=" + length);
      RemoteBlockReader2 reader = (RemoteBlockReader2) spy(
          util.getBlockReader(testBlock, startOffset, length));
      util.readAndCheckEOS(reader, length, true);
      verify(reader).sendReadResult(Status.CHECKSUM_OK);
      reader.close();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:21,代码来源:TestClientBlockVerification.java

示例12: checkSuccess

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
static void checkSuccess(
    BlockOpResponseProto status, Peer peer,
    ExtendedBlock block, String file)
    throws IOException {
  if (status.getStatus() != Status.SUCCESS) {
    if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
      throw new InvalidBlockTokenException(
          "Got access token error for OP_READ_BLOCK, self="
              + peer.getLocalAddressString() + ", remote="
              + peer.getRemoteAddressString() + ", for file " + file
              + ", for pool " + block.getBlockPoolId() + " block " 
              + block.getBlockId() + "_" + block.getGenerationStamp());
    } else {
      throw new IOException("Got error for OP_READ_BLOCK, self="
          + peer.getLocalAddressString() + ", remote="
          + peer.getRemoteAddressString() + ", for file " + file
          + ", for pool " + block.getBlockPoolId() + " block " 
          + block.getBlockId() + "_" + block.getGenerationStamp());
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:22,代码来源:RemoteBlockReader2.java

示例13: getOOBStatus

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
 * Returns the OOB status if this ack contains one. 
 * @return null if it is not an OOB ack.
 */
public Status getOOBStatus() {
  // Normal data transfer acks will have a valid sequence number, so
  // this will return right away in most cases.
  if (getSeqno() != UNKOWN_SEQNO) {
    return null;
  }
  for (Status reply : proto.getStatusList()) {
    // The following check is valid because protobuf guarantees to
    // preserve the ordering of enum elements.
    if (reply.getNumber() >= OOB_START && reply.getNumber() <= OOB_END) {
      return reply;
    }
  }
  return null;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:20,代码来源:PipelineAck.java

示例14: replaceBlock

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
      HdfsServerConstants.READ_TIMEOUT);
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
      source.getStorageID(), sourceProxy);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
  return proto.getStatus() == Status.SUCCESS;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:TestBlockReplacement.java

示例15: writeZeroLengthPacket

import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
  PacketHeader hdr = new PacketHeader(
    8,                   // size of packet
    block.getNumBytes(), // OffsetInBlock
    100,                 // sequencenumber
    true,                // lastPacketInBlock
    0,                   // chunk length
    false);               // sync block
  hdr.write(sendOut);
  sendOut.writeInt(0);           // zero checksum

  //ok finally write a block with 0 len
  sendResponse(Status.SUCCESS, "", null, recvOut);
  new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
  sendRecvData(description, false);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:18,代码来源:TestDataTransferProtocol.java


注:本文中的org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。