本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto类的典型用法代码示例。如果您正苦于以下问题:Java GetHdfsBlockLocationsRequestProto类的具体用法?Java GetHdfsBlockLocationsRequestProto怎么用?Java GetHdfsBlockLocationsRequestProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
GetHdfsBlockLocationsRequestProto类属于org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos包,在下文中一共展示了GetHdfsBlockLocationsRequestProto类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getHdfsBlocksMetadata
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
long[] blockIds,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
List<TokenProto> tokensProtos =
new ArrayList<TokenProto>(tokens.size());
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.setBlockPoolId(blockPoolId)
.addAllBlockIds(Longs.asList(blockIds))
.addAllTokens(tokensProtos)
.build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blockPoolId, blockIds,
volumeIds, volumeIndexes);
}
示例2: getHdfsBlocksMetadata
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
// Convert to proto objects
List<ExtendedBlockProto> blocksProtos =
new ArrayList<ExtendedBlockProto>(blocks.size());
List<TokenProto> tokensProtos =
new ArrayList<TokenProto>(tokens.size());
for (ExtendedBlock b : blocks) {
blocksProtos.add(PBHelper.convert(b));
}
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.addAllBlocks(blocksProtos)
.addAllTokens(tokensProtos)
.build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
volumeIds, volumeIndexes);
}
示例3: getHdfsBlocksMetadata
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
// Convert to proto objects
List<ExtendedBlockProto> blocksProtos =
new ArrayList<>(blocks.size());
List<TokenProto> tokensProtos = new ArrayList<>(tokens.size());
for (ExtendedBlock b : blocks) {
blocksProtos.add(PBHelper.convert(b));
}
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.addAllBlocks(blocksProtos).addAllTokens(tokensProtos).build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[]{}),
volumeIds, volumeIndexes);
}