本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status类的典型用法代码示例。如果您正苦于以下问题:Java Status类的具体用法?Java Status怎么用?Java Status使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Status类属于org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos包,在下文中一共展示了Status类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkBlockOpStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
public static void checkBlockOpStatus(
BlockOpResponseProto response,
String logInfo) throws IOException {
if (response.getStatus() != Status.SUCCESS) {
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
} else {
throw new IOException(
"Got error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
}
}
}
示例2: PipelineAck
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
* Constructor
* @param seqno sequence number
* @param replies an array of replies
* @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline
*/
public PipelineAck(long seqno, int[] replies,
long downstreamAckTimeNanos) {
ArrayList<Status> statusList = Lists.newArrayList();
ArrayList<Integer> flagList = Lists.newArrayList();
for (int r : replies) {
statusList.add(StatusFormat.getStatus(r));
flagList.add(r);
}
proto = PipelineAckProto.newBuilder()
.setSeqno(seqno)
.addAllReply(statusList)
.addAllFlag(flagList)
.setDownstreamAckTimeNanos(downstreamAckTimeNanos)
.build();
}
示例3: getOOBStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
* Returns the OOB status if this ack contains one.
* @return null if it is not an OOB ack.
*/
public Status getOOBStatus() {
// Normal data transfer acks will have a valid sequence number, so
// this will return right away in most cases.
if (getSeqno() != UNKOWN_SEQNO) {
return null;
}
for (Status s : proto.getReplyList()) {
// The following check is valid because protobuf guarantees to
// preserve the ordering of enum elements.
if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
return s;
}
}
return null;
}
示例4: transferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock " + blk + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
}
示例5: testUnalignedReads
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
* Test various unaligned reads to make sure that we properly
* account even when we don't start or end on a checksum boundary
*/
@Test
public void testUnalignedReads() throws Exception {
int startOffsets[] = new int[] { 0, 3, 129 };
int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
for (int startOffset : startOffsets) {
for (int length : lengths) {
DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
" len=" + length);
RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
util.getBlockReader(testBlock, startOffset, length));
util.readAndCheckEOS(reader, length, true);
verify(reader).sendReadResult(Status.CHECKSUM_OK);
reader.close();
}
}
}
示例6: writeZeroLengthPacket
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
PacketHeader hdr = new PacketHeader(
8, // size of packet
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new int[] {PipelineAck.combineHeader
(PipelineAck.ECN.DISABLED, Status.SUCCESS)}).write
(recvOut);
sendRecvData(description, false);
}
示例7: testWrite
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long newGS,
String description, Boolean eofExcepted) throws IOException {
sendBuf.reset();
recvBuf.reset();
writeBlock(block, stage, newGS, DEFAULT_CHECKSUM);
if (eofExcepted) {
sendResponse(Status.ERROR, null, null, recvOut);
sendRecvData(description, true);
} else if (stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
sendRecvData(description, false);
} else {
writeZeroLengthPacket(block, description);
}
}
示例8: checkBlockOpStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
public static void checkBlockOpStatus(
BlockOpResponseProto response,
String logInfo) throws IOException {
if (response.getStatus() != Status.SUCCESS) {
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error"
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
} else {
throw new IOException(
"Got error"
+ ", status=" + response.getStatus().name()
+ ", status message " + response.getMessage()
+ ", " + logInfo
);
}
}
}
示例9: getOOBStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
* Returns the OOB status if this ack contains one.
* @return null if it is not an OOB ack.
*/
public Status getOOBStatus() {
// Normal data transfer acks will have a valid sequence number, so
// this will return right away in most cases.
if (getSeqno() != UNKOWN_SEQNO) {
return null;
}
for (Status s : proto.getReplyList()) {
// The following check is valid because protobuf guarantees to
// preserve the ordering of enum elements.
if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) {
return s;
}
}
return null;
}
示例10: transferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenIdentifier.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock " + blk + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
}
示例11: testUnalignedReads
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
* Test various unaligned reads to make sure that we properly
* account even when we don't start or end on a checksum boundary
*/
@Test
public void testUnalignedReads() throws Exception {
int startOffsets[] = new int[]{0, 3, 129};
int lengths[] = new int[]{30, 300, 512, 513, 1025};
for (int startOffset : startOffsets) {
for (int length : lengths) {
DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
" len=" + length);
RemoteBlockReader2 reader = (RemoteBlockReader2) spy(
util.getBlockReader(testBlock, startOffset, length));
util.readAndCheckEOS(reader, length, true);
verify(reader).sendReadResult(Status.CHECKSUM_OK);
reader.close();
}
}
}
示例12: checkSuccess
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
static void checkSuccess(
BlockOpResponseProto status, Peer peer,
ExtendedBlock block, String file)
throws IOException {
if (status.getStatus() != Status.SUCCESS) {
if (status.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error for OP_READ_BLOCK, self="
+ peer.getLocalAddressString() + ", remote="
+ peer.getRemoteAddressString() + ", for file " + file
+ ", for pool " + block.getBlockPoolId() + " block "
+ block.getBlockId() + "_" + block.getGenerationStamp());
} else {
throw new IOException("Got error for OP_READ_BLOCK, self="
+ peer.getLocalAddressString() + ", remote="
+ peer.getRemoteAddressString() + ", for file " + file
+ ", for pool " + block.getBlockPoolId() + " block "
+ block.getBlockId() + "_" + block.getGenerationStamp());
}
}
}
示例13: getOOBStatus
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
/**
* Returns the OOB status if this ack contains one.
* @return null if it is not an OOB ack.
*/
public Status getOOBStatus() {
// Normal data transfer acks will have a valid sequence number, so
// this will return right away in most cases.
if (getSeqno() != UNKOWN_SEQNO) {
return null;
}
for (Status reply : proto.getStatusList()) {
// The following check is valid because protobuf guarantees to
// preserve the ordering of enum elements.
if (reply.getNumber() >= OOB_START && reply.getNumber() <= OOB_END) {
return reply;
}
}
return null;
}
示例14: replaceBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
Socket sock = new Socket();
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
HdfsServerConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, BlockTokenSecretManager.DUMMY_TOKEN,
source.getStorageID(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
return proto.getStatus() == Status.SUCCESS;
}
示例15: writeZeroLengthPacket
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; //导入依赖的package包/类
private void writeZeroLengthPacket(ExtendedBlock block, String description)
throws IOException {
PacketHeader hdr = new PacketHeader(
8, // size of packet
block.getNumBytes(), // OffsetInBlock
100, // sequencenumber
true, // lastPacketInBlock
0, // chunk length
false); // sync block
hdr.write(sendOut);
sendOut.writeInt(0); // zero checksum
//ok finally write a block with 0 len
sendResponse(Status.SUCCESS, "", null, recvOut);
new PipelineAck(100, new Status[]{Status.SUCCESS}).write(recvOut);
sendRecvData(description, false);
}