当前位置: 首页>>代码示例>>Java>>正文


Java FinalizeCommand类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.FinalizeCommand的典型用法代码示例。如果您正苦于以下问题:Java FinalizeCommand类的具体用法?Java FinalizeCommand怎么用?Java FinalizeCommand使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FinalizeCommand类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了FinalizeCommand类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convert

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:PBHelper.java

示例2: convert

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:PBHelper.java

示例3: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + " " + blist.getNumberOfBlocks()
         + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState())
    return new FinalizeCommand(poolId);
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:NameNodeRpcServer.java

示例4: convert

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:PBHelper.java

示例5: convert

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
    case BalancerBandwidthCommand:
      return PBHelper.convert(proto.getBalancerCmd());
    case BlockCommand:
      return PBHelper.convert(proto.getBlkCmd());
    case BlockRecoveryCommand:
      return PBHelper.convert(proto.getRecoveryCmd());
    case FinalizeCommand:
      return PBHelper.convert(proto.getFinalizeCmd());
    case KeyUpdateCommand:
      return PBHelper.convert(proto.getKeyUpdateCmd());
    case RegisterCommand:
      return REG_CMD;
  }
  return null;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:PBHelper.java

示例6: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean hasStaleStorages = true;
  for(StorageBlockReport r : reports) {
    final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
    hasStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !nn.isStandbyState() &&
      !hasStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:24,代码来源:NameNodeRpcServer.java

示例7: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports,
      BlockReportContext context) throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for (int r = 0; r < reports.length; r++) {
    final BlockListAsLongs blocks = reports[r].getBlocks();
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
        blocks, context, (r == reports.length - 1));
    metrics.incrStorageBlockReportOps();
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:NameNodeRpcServer.java

示例8: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
      String poolId, StorageBlockReport[] reports,
      BlockReportContext context) throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for (int r = 0; r < reports.length; r++) {
    final BlockListAsLongs blocks = new BlockListAsLongs(reports[r].getBlocks());
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
        blocks, context, (r == reports.length - 1));
    metrics.incrStorageBlockReportOps();
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:34,代码来源:NameNodeRpcServer.java

示例9: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for(StorageBlockReport r : reports) {
    final BlockListAsLongs blocks = new BlockListAsLongs(r.getBlocks());
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks);
    metrics.incrStorageBlockReportOps();
  }

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:31,代码来源:NameNodeRpcServer.java

示例10: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  
  BlockReport blist = reports[0].getReport(); // Assume no federation '0'
  if (blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug(
        "*BLOCK* NameNode.blockReport: " + "from " + nodeReg + " " +
            blist.getNumBlocks() + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  return new FinalizeCommand(poolId);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:NameNodeRpcServer.java

示例11: blockReport

import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(final DatanodeRegistration nodeReg,
      String poolId, final StorageBlockReport[] reports,
      final BlockReportContext context) throws IOException {
  checkNNStartup();
  verifyRequest(nodeReg);
  if(blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
         + "from " + nodeReg + ", reports.length=" + reports.length);
  }
  final BlockManager bm = namesystem.getBlockManager(); 
  boolean noStaleStorages = false;
  for (int r = 0; r < reports.length; r++) {
    final BlockListAsLongs blocks = reports[r].getBlocks();
    //
    // BlockManager.processReport accumulates information of prior calls
    // for the same node and storage, so the value returned by the last
    // call of this loop is the final updated value for noStaleStorage.
    //
    final int index = r;
    noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {
      @Override
      public Boolean call() throws IOException {
        return bm.processReport(nodeReg, reports[index].getStorage(),
            blocks, context, (index == reports.length - 1));
      }
    });
    metrics.incrStorageBlockReportOps();
  }
  BlockManagerFaultInjector.getInstance().
      incomingBlockReportRpc(nodeReg, context);

  if (nn.getFSImage().isUpgradeFinalized() &&
      !namesystem.isRollingUpgrade() &&
      !nn.isStandbyState() &&
      noStaleStorages) {
    return new FinalizeCommand(poolId);
  }

  return null;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:NameNodeRpcServer.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.FinalizeCommand类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。