当前位置: 首页>>代码示例>>Java>>正文


Java ExtendedBlockProto类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto的典型用法代码示例。如果您正苦于以下问题:Java ExtendedBlockProto类的具体用法?Java ExtendedBlockProto怎么用?Java ExtendedBlockProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


ExtendedBlockProto类属于org.apache.hadoop.hdfs.protocol.proto.HdfsProtos包,在下文中一共展示了ExtendedBlockProto类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
public static ExtendedBlockProto convert(final ExtendedBlock b) {
  if (b == null) return null;
 return ExtendedBlockProto.newBuilder().
    setPoolId(b.getBlockPoolId()).
    setBlockId(b.getBlockId()).
    setNumBytes(b.getNumBytes()).
    setGenerationStamp(b.getGenerationStamp()).
    build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:PBHelper.java

示例2: testConvertExtendedBlock

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
@Test
public void testConvertExtendedBlock() {
  ExtendedBlock b = getExtendedBlock();
  ExtendedBlockProto bProto = PBHelper.convert(b);
  ExtendedBlock b1 = PBHelper.convert(bProto);
  assertEquals(b, b1);
  
  b.setBlockId(-1);
  bProto = PBHelper.convert(b);
  b1 = PBHelper.convert(bProto);
  assertEquals(b, b1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestPBHelper.java

示例3: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
public static ExtendedBlockProto convert(final ExtendedBlock b) {
  if (b == null) return null;
  return ExtendedBlockProto.newBuilder().
      setPoolId(b.getBlockPoolId()).
      setBlockId(b.getBlockId()).
      setNumBytes(b.getNumBytes()).
      setGenerationStamp(b.getGenerationStamp()).
      build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:10,代码来源:PBHelperClient.java

示例4: convertBlockECRecoveryInfo

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
    BlockECRecoveryInfoProto blockEcRecoveryInfoProto) {
  ExtendedBlockProto blockProto = blockEcRecoveryInfoProto.getBlock();
  ExtendedBlock block = PBHelperClient.convert(blockProto);

  DatanodeInfosProto sourceDnInfosProto = blockEcRecoveryInfoProto
      .getSourceDnInfos();
  DatanodeInfo[] sourceDnInfos = PBHelperClient.convert(sourceDnInfosProto);

  DatanodeInfosProto targetDnInfosProto = blockEcRecoveryInfoProto
      .getTargetDnInfos();
  DatanodeInfo[] targetDnInfos = PBHelperClient.convert(targetDnInfosProto);

  HdfsProtos.StorageUuidsProto targetStorageUuidsProto = blockEcRecoveryInfoProto
      .getTargetStorageUuids();
  String[] targetStorageUuids = convert(targetStorageUuidsProto);

  StorageTypesProto targetStorageTypesProto = blockEcRecoveryInfoProto
      .getTargetStorageTypes();
  StorageType[] convertStorageTypes = PBHelperClient.convertStorageTypes(
      targetStorageTypesProto.getStorageTypesList(), targetStorageTypesProto
          .getStorageTypesList().size());

  byte[] liveBlkIndices = blockEcRecoveryInfoProto.getLiveBlockIndices()
      .toByteArray();
  ErasureCodingPolicy ecPolicy =
      PBHelperClient.convertErasureCodingPolicy(
          blockEcRecoveryInfoProto.getEcPolicy());
  return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
      targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:32,代码来源:PBHelper.java

示例5: testConvertExtendedBlock

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
@Test
public void testConvertExtendedBlock() {
  ExtendedBlock b = getExtendedBlock();
  ExtendedBlockProto bProto = PBHelperClient.convert(b);
  ExtendedBlock b1 = PBHelperClient.convert(bProto);
  assertEquals(b, b1);
  
  b.setBlockId(-1);
  bProto = PBHelperClient.convert(b);
  b1 = PBHelperClient.convert(bProto);
  assertEquals(b, b1);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:TestPBHelper.java

示例6: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException {
  // Convert to proto objects
  List<ExtendedBlockProto> blocksProtos = 
      new ArrayList<ExtendedBlockProto>(blocks.size());
  List<TokenProto> tokensProtos = 
      new ArrayList<TokenProto>(tokens.size());
  for (ExtendedBlock b : blocks) {
    blocksProtos.add(PBHelper.convert(b));
  }
  for (Token<BlockTokenIdentifier> t : tokens) {
    tokensProtos.add(PBHelper.convert(t));
  }
  // Build the request
  GetHdfsBlockLocationsRequestProto request = 
      GetHdfsBlockLocationsRequestProto.newBuilder()
      .addAllBlocks(blocksProtos)
      .addAllTokens(tokensProtos)
      .build();
  // Send the RPC
  GetHdfsBlockLocationsResponseProto response;
  try {
    response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  // List of volumes in the response
  List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
  List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
  for (ByteString bs : volumeIdsByteStrings) {
    volumeIds.add(bs.toByteArray());
  }
  // Array of indexes into the list of volumes, one per block
  List<Integer> volumeIndexes = response.getVolumeIndexesList();
  // Parsed HdfsVolumeId values, one per block
  return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}), 
      volumeIds, volumeIndexes);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:40,代码来源:ClientDatanodeProtocolTranslatorPB.java

示例7: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
public static ExtendedBlock convert(ExtendedBlockProto eb) {
  if (eb == null) {
    return null;
  }
  return new ExtendedBlock(eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(),
      eb.getGenerationStamp());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:8,代码来源:PBHelper.java

示例8: getHdfsBlocksMetadata

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException {
  // Convert to proto objects
  List<ExtendedBlockProto> blocksProtos =
      new ArrayList<>(blocks.size());
  List<TokenProto> tokensProtos = new ArrayList<>(tokens.size());
  for (ExtendedBlock b : blocks) {
    blocksProtos.add(PBHelper.convert(b));
  }
  for (Token<BlockTokenIdentifier> t : tokens) {
    tokensProtos.add(PBHelper.convert(t));
  }
  // Build the request
  GetHdfsBlockLocationsRequestProto request =
      GetHdfsBlockLocationsRequestProto.newBuilder()
          .addAllBlocks(blocksProtos).addAllTokens(tokensProtos).build();
  // Send the RPC
  GetHdfsBlockLocationsResponseProto response;
  try {
    response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  // List of volumes in the response
  List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
  List<byte[]> volumeIds = new ArrayList<>(volumeIdsByteStrings.size());
  for (ByteString bs : volumeIdsByteStrings) {
    volumeIds.add(bs.toByteArray());
  }
  // Array of indexes into the list of volumes, one per block
  List<Integer> volumeIndexes = response.getVolumeIndexesList();
  // Parsed HdfsVolumeId values, one per block
  return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[]{}),
      volumeIds, volumeIndexes);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:37,代码来源:ClientDatanodeProtocolTranslatorPB.java

示例9: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; //导入依赖的package包/类
ExtendedBlockProto convert(ExtendedBlock b); 
开发者ID:apache,项目名称:hbase,代码行数:2,代码来源:FanOutOneBlockAsyncDFSOutputHelper.java


注:本文中的org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。