本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto类的典型用法代码示例。如果您正苦于以下问题:Java OpTransferBlockProto类的具体用法?Java OpTransferBlockProto怎么用?Java OpTransferBlockProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
OpTransferBlockProto类属于org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos包,在下文中一共展示了OpTransferBlockProto类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: transferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildClientHeader(
blk, clientName, blockToken))
.addAllTargets(PBHelper.convert(targets))
.addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes))
.build();
send(out, Op.TRANSFER_BLOCK, proto);
}
示例2: opTransferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
final OpTransferBlockProto proto =
OpTransferBlockProto.parseFrom(vintPrefixed(in));
final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
TraceScope traceScope = continueTraceSpan(proto.getHeader(),
proto.getClass().getSimpleName());
try {
transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
targets,
PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
} finally {
if (traceScope != null) traceScope.close();
}
}
示例3: transferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildClientHeader(
blk, clientName, blockToken))
.addAllTargets(PBHelperClient.convert(targets))
.addAllTargetStorageTypes(
PBHelperClient.convertStorageTypes(targetStorageTypes))
.build();
send(out, Op.TRANSFER_BLOCK, proto);
}
示例4: opTransferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
final OpTransferBlockProto proto =
OpTransferBlockProto.parseFrom(vintPrefixed(in));
final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
TraceScope traceScope = continueTraceSpan(proto.getHeader(),
proto.getClass().getSimpleName());
try {
transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
targets,
PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
} finally {
if (traceScope != null) traceScope.close();
}
}
示例5: transferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets) throws IOException {
OpTransferBlockProto proto = OpTransferBlockProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildClientHeader(
blk, clientName, blockToken))
.addAllTargets(PBHelper.convert(targets))
.build();
send(out, Op.TRANSFER_BLOCK, proto);
}
示例6: opTransferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
final OpTransferBlockProto proto =
OpTransferBlockProto.parseFrom(vintPrefixed(in));
transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
PBHelper.convert(proto.getTargetsList()));
}
示例7: transferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken, final String clientName,
final DatanodeInfo[] targets) throws IOException {
OpTransferBlockProto proto = OpTransferBlockProto.newBuilder().setHeader(
DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
.addAllTargets(PBHelper.convert(targets)).build();
send(out, Op.TRANSFER_BLOCK, proto);
}
示例8: opTransferBlock
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; //导入依赖的package包/类
/**
* Receive {@link Op#TRANSFER_BLOCK}
*/
private void opTransferBlock(DataInputStream in) throws IOException {
final OpTransferBlockProto proto =
OpTransferBlockProto.parseFrom(vintPrefixed(in));
transferBlock(
PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
proto.getHeader().getClientName(),
PBHelper.convert(proto.getTargetsList()));
}