当前位置: 首页>>代码示例>>Java>>正文


Java BlockCommand.getBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockCommand.getBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java BlockCommand.getBlocks方法的具体用法?Java BlockCommand.getBlocks怎么用?Java BlockCommand.getBlocks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.BlockCommand的用法示例。


在下文中一共展示了BlockCommand.getBlocks方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:24,代码来源:PBHelper.java

示例2: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder =
      BlockCommandProto.newBuilder().setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
    case DatanodeProtocol.DNA_TRANSFER:
      builder.setAction(BlockCommandProto.Action.TRANSFER);
      break;
    case DatanodeProtocol.DNA_INVALIDATE:
      builder.setAction(BlockCommandProto.Action.INVALIDATE);
      break;
    case DatanodeProtocol.DNA_SHUTDOWN:
      builder.setAction(BlockCommandProto.Action.SHUTDOWN);
      break;
    default:
      throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (Block block : blocks) {
    builder.addBlocks(PBHelper.convert(block));
  }
  builder.addAllTargets(PBHelper.convert(cmd.getTargets()));
  return builder.build();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:24,代码来源:PBHelper.java

示例3: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  return builder.build();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:25,代码来源:PBHelper.java

示例4: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  StorageType[][] types = cmd.getTargetStorageTypes();
  if (types != null) {
    builder.addAllTargetStorageTypes(convert(types));
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:PBHelper.java

示例5: testConvertBlockCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[] { new Block(21), new Block(22) };
  DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
      new DatanodeInfo[2] };
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
  StorageType[][] storageTypes = {{StorageType.DEFAULT},
      {StorageType.DEFAULT, StorageType.DEFAULT}};
  BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
      blocks, dnInfos, storageTypes, storageIDs);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestPBHelper.java

示例6: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelperClient.convert(blocks[i]));
  }
  builder.addAllTargets(PBHelperClient.convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  StorageType[][] types = cmd.getTargetStorageTypes();
  if (types != null) {
    builder.addAllTargetStorageTypes(PBHelperClient.convert(types));
  }
  return builder.build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:PBHelper.java

示例7: testConvertBlockCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[] { new Block(21), new Block(22) };
  DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
      new DatanodeInfo[2] };
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
      blocks, dnInfos);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:TestPBHelper.java

示例8: testConvertBlockCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[]{new Block(21), new Block(22)};
  DatanodeInfo[][] dnInfos =
      new DatanodeInfo[][]{new DatanodeInfo[1], new DatanodeInfo[2]};
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  BlockCommand bc =
      new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", blocks, dnInfos);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:TestPBHelper.java

示例9: processCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
/**
 *
 * @param cmd
 * @return true if further processing may be required or false otherwise.
 * @throws IOException
 */
 private boolean processCommand(DatanodeCommand cmd, long processStartTime)
     throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  boolean retValue = true;
  long startTime = System.currentTimeMillis();

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(namespaceId,
        bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(namespaceId, toDelete);
      }        
      data.invalidate(namespaceId, toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.blocksRemoved.inc(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    shouldServiceRun = false;
    retValue = false;
    break;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
      firstBlockReportSent = false;
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
     storage.finalizedUpgrade(namespaceId);
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
     recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets(),
         processStartTime);
    break;
  case DatanodeProtocol.DNA_RAIDTASK:
    processRaidTaskCommand((RaidTaskCommand) cmd);
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  long endTime = System.currentTimeMillis();
  if (endTime - startTime > 1000) {
    LOG.info("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  }
  return retValue;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:80,代码来源:DataNode.java

示例10: processCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
/**
   * 
   * @param cmd
   * @return true if further processing may be required or false otherwise. 
   * @throws IOException
   */
private boolean processCommand(DatanodeCommand cmd) throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.incrBlocksReplicated(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(toDelete);
      }
      data.invalidate(toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.incrBlocksRemoved(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    this.shutdown();
    return false;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    storage.finalizeUpgrade();
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    recoverBlocks(bcmd.getBlocks(), bcmd.getTargets());
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE");
    if (isBlockTokenEnabled) {
      blockTokenSecretManager.setKeys(((KeyUpdateCommand) cmd).getExportedKeys());
    }
    break;
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    LOG.info("DatanodeCommand action: DNA_BALANCERBANDWIDTHUPDATE");
    int vsn = ((BalancerBandwidthCommand) cmd).getBalancerBandwidthVersion();
    if (vsn >= 1) {
      long bandwidth = 
                 ((BalancerBandwidthCommand) cmd).getBalancerBandwidthValue();
      if (bandwidth > 0) {
        DataXceiverServer dxcs =
                     (DataXceiverServer) this.dataXceiverServer.getRunnable();
        dxcs.balanceThrottler.setBandwidth(bandwidth);
      }
    }
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  return true;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:80,代码来源:DataNode.java

示例11: processCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
/**
 *
 * @param cmd
 * @return true if further processing may be required or false otherwise.
 * @throws IOException
 */
private boolean processCommand(DatanodeCommand cmd) throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  boolean retValue = true;
  long startTime = System.currentTimeMillis();

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(namespaceId,
        bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(namespaceId, toDelete);
      }        
      data.invalidate(namespaceId, toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.blocksRemoved.inc(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    shouldServiceRun = false;
    retValue = false;
    break;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
      firstBlockReportSent = false;
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
     storage.finalizedUpgrade(namespaceId);
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets());
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  long endTime = System.currentTimeMillis();
  if (endTime - startTime > 1000) {
    LOG.info("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  }
  return retValue;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:75,代码来源:DataNode.java

示例12: processCommand

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入方法依赖的package包/类
/**
   * 
   * @param cmd
   * @return true if further processing may be required or false otherwise. 
   * @throws IOException
   */
private boolean processCommand(DatanodeCommand cmd) throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(toDelete);
      }
      data.invalidate(toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.blocksRemoved.inc(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    this.shutdown();
    return false;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    storage.finalizeUpgrade();
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    recoverBlocks(bcmd.getBlocks(), bcmd.getTargets());
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  return true;
}
 
开发者ID:thisisvoa,项目名称:hadoop-0.20,代码行数:61,代码来源:DataNode.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlockCommand.getBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。