当前位置: 首页>>代码示例>>Java>>正文


Java BlockRecoveryCommand类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand的典型用法代码示例。如果您正苦于以下问题:Java BlockRecoveryCommand类的具体用法?Java BlockRecoveryCommand怎么用?Java BlockRecoveryCommand使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockRecoveryCommand类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了BlockRecoveryCommand类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:PBHelper.java

示例2: testConvertBlockRecoveryCommand

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
@Test
public void testConvertBlockRecoveryCommand() {
  DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };

  List<RecoveringBlock> blks = ImmutableList.of(
    new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
    new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
  );
  
  BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
  BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
  assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
  assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
  
  BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
  
  List<RecoveringBlock> cmd2Blks = Lists.newArrayList(
      cmd2.getRecoveringBlocks());
  assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
  assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
  assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
  assertEquals(cmd.toString(), cmd2.toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestPBHelper.java

示例3: convert

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:PBHelper.java

示例4: convert

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:PBHelper.java

示例5: convert

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
    case BalancerBandwidthCommand:
      return PBHelper.convert(proto.getBalancerCmd());
    case BlockCommand:
      return PBHelper.convert(proto.getBlkCmd());
    case BlockRecoveryCommand:
      return PBHelper.convert(proto.getRecoveryCmd());
    case FinalizeCommand:
      return PBHelper.convert(proto.getFinalizeCmd());
    case KeyUpdateCommand:
      return PBHelper.convert(proto.getKeyUpdateCmd());
    case RegisterCommand:
      return REG_CMD;
  }
  return null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:PBHelper.java

示例6: testConvertBlockRecoveryCommand

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
@Test
public void testConvertBlockRecoveryCommand() {
  DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] dnInfo = new DatanodeInfo[]{di1, di2};

  List<RecoveringBlock> blks = ImmutableList
      .of(new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
          new RecoveringBlock(getExtendedBlock(2), dnInfo, 3));
  
  BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
  BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
  assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
  assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
  
  BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
  
  List<RecoveringBlock> cmd2Blks =
      Lists.newArrayList(cmd2.getRecoveringBlocks());
  assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
  assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
  assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
  assertEquals(cmd.toString(), cmd2.toString());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:TestPBHelper.java

示例7: getLeaseRecoveryCommand

import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; //导入依赖的package包/类
BlockRecoveryCommand getLeaseRecoveryCommand(int maxTransfers) {
  List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
  if(blocks == null)
    return null;
  BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.size());
  for(BlockInfoUnderConstruction b : blocks) {
    brCommand.add(new RecoveringBlock(
        b, b.getExpectedLocations(), b.getBlockRecoveryId()));
  }
  return brCommand;
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:12,代码来源:DatanodeDescriptor.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。