当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeInfosProto类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto的典型用法代码示例。如果您正苦于以下问题:Java DatanodeInfosProto类的具体用法?Java DatanodeInfosProto怎么用?Java DatanodeInfosProto使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DatanodeInfosProto类属于org.apache.hadoop.hdfs.protocol.proto.HdfsProtos包,在下文中一共展示了DatanodeInfosProto类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:PBHelper.java

示例2: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }
  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
    case TRANSFER:
      action = DatanodeProtocol.DNA_TRANSFER;
      break;
    case INVALIDATE:
      action = DatanodeProtocol.DNA_INVALIDATE;
      break;
    case SHUTDOWN:
      action = DatanodeProtocol.DNA_SHUTDOWN;
      break;
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:PBHelper.java

示例3: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
private static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) {
  DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
  for (int i = 0; i < targets.length; i++) {
    ret[i] = DatanodeInfosProto.newBuilder()
        .addAllDatanodes(PBHelper.convert(targets[i])).build();
  }
  return Arrays.asList(ret);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:PBHelper.java

示例4: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
  List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
  DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
  for (int i = 0; i < infos.length; i++) {
    infos[i] = convert(proto.get(i));
  }
  return infos;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:9,代码来源:PBHelperClient.java

示例5: convertToDnInfosProto

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
private static DatanodeInfosProto convertToDnInfosProto(DatanodeInfo[] dnInfos) {
  DatanodeInfosProto.Builder builder = DatanodeInfosProto.newBuilder();
  for (DatanodeInfo datanodeInfo : dnInfos) {
    builder.addDatanodes(PBHelperClient.convert(datanodeInfo));
  }
  return builder.build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:PBHelper.java

示例6: convertBlockECRecoveryInfo

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
public static BlockECRecoveryInfo convertBlockECRecoveryInfo(
    BlockECRecoveryInfoProto blockEcRecoveryInfoProto) {
  ExtendedBlockProto blockProto = blockEcRecoveryInfoProto.getBlock();
  ExtendedBlock block = PBHelperClient.convert(blockProto);

  DatanodeInfosProto sourceDnInfosProto = blockEcRecoveryInfoProto
      .getSourceDnInfos();
  DatanodeInfo[] sourceDnInfos = PBHelperClient.convert(sourceDnInfosProto);

  DatanodeInfosProto targetDnInfosProto = blockEcRecoveryInfoProto
      .getTargetDnInfos();
  DatanodeInfo[] targetDnInfos = PBHelperClient.convert(targetDnInfosProto);

  HdfsProtos.StorageUuidsProto targetStorageUuidsProto = blockEcRecoveryInfoProto
      .getTargetStorageUuids();
  String[] targetStorageUuids = convert(targetStorageUuidsProto);

  StorageTypesProto targetStorageTypesProto = blockEcRecoveryInfoProto
      .getTargetStorageTypes();
  StorageType[] convertStorageTypes = PBHelperClient.convertStorageTypes(
      targetStorageTypesProto.getStorageTypesList(), targetStorageTypesProto
          .getStorageTypesList().size());

  byte[] liveBlkIndices = blockEcRecoveryInfoProto.getLiveBlockIndices()
      .toByteArray();
  ErasureCodingPolicy ecPolicy =
      PBHelperClient.convertErasureCodingPolicy(
          blockEcRecoveryInfoProto.getEcPolicy());
  return new BlockECRecoveryInfo(block, sourceDnInfos, targetDnInfos,
      targetStorageUuids, convertStorageTypes, liveBlkIndices, ecPolicy);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:32,代码来源:PBHelper.java

示例7: convert

import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; //导入依赖的package包/类
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelperClient.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelperClient.convert(targetList.get(i));
  }

  StorageType[][] targetStorageTypes = new StorageType[targetList.size()][];
  List<StorageTypesProto> targetStorageTypesList = blkCmd.getTargetStorageTypesList();
  if (targetStorageTypesList.isEmpty()) { // missing storage types
    for(int i = 0; i < targetStorageTypes.length; i++) {
      targetStorageTypes[i] = new StorageType[targets[i].length];
      Arrays.fill(targetStorageTypes[i], StorageType.DEFAULT);
    }
  } else {
    for(int i = 0; i < targetStorageTypes.length; i++) {
      List<StorageTypeProto> p = targetStorageTypesList.get(i).getStorageTypesList();
      targetStorageTypes[i] = PBHelperClient.convertStorageTypes(p, targets[i].length);
    }
  }

  List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
  String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
  for(int i = 0; i < targetStorageIDs.length; i++) {
    List<String> storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList();
    targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]);
  }

  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  default:
    throw new AssertionError("Unknown action type: " + blkCmd.getAction());
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
      targetStorageTypes, targetStorageIDs);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:51,代码来源:PBHelper.java


注:本文中的org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。