当前位置: 首页>>代码示例>>Java>>正文


Java DatanodeCommand.getAction方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeCommand.getAction方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeCommand.getAction方法的具体用法?Java DatanodeCommand.getAction怎么用?Java DatanodeCommand.getAction使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.DatanodeCommand的用法示例。


在下文中一共展示了DatanodeCommand.getAction方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:NNThroughputBenchmark.java

示例2: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:NNThroughputBenchmark.java

示例3: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:NNThroughputBenchmark.java

示例4: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
	// register datanode
	DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
			DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
	if (cmds != null) {
		for (DatanodeCommand cmd : cmds) {
			if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
				// Send a copy of a block to another datanode
				BlockCommand bcmd = (BlockCommand) cmd;
				return transferBlocks(bcmd.getBlocks(),
						bcmd.getTargets());
			}
		}
	}
	return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:NNThroughputBenchmark.java

示例5: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:NNThroughputBenchmark.java

示例6: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:23,代码来源:NNThroughputBenchmark.java

示例7: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
// keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep =
      {new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY,
          DF_USED, DF_CAPACITY - DF_USED, DF_USED)};
  DatanodeCommand[] cmds =
      nameNodeProto.sendHeartbeat(dnRegistration, rep, 0, 0, 0)
          .getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand) cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:NNThroughputBenchmark.java

示例8: isValidStandbyCommand

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
private static boolean isValidStandbyCommand(DatanodeCommand cmd) {
  for (int validCommand : validStandbyCommands) {
    if (cmd.getAction() == validCommand) {
      return true;
    }
  }
  return false;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:OfferService.java

示例9: processCommand

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 * Process an array of datanode commands. This function has logic to check for
 * failover. Any commands should be processed using this function as an
 * entry point.
 * 
 * @param cmds an array of datanode commands
 * @return true if further processing may be required or false otherwise.
 */
private boolean processCommand(DatanodeCommand[] cmds, long processStartTime)
    throws InterruptedException {
  if (cmds != null) {
    // at each heartbeat the standby offer service will talk to ZK!
    boolean switchedFromStandbyToPrimary = checkFailover();
    for (DatanodeCommand cmd : cmds) {
      try {
        // The datanode has received a register command after the failover, this
        // means that the offerservice thread for the datanode was down for a
        // while and it most probably did not clean up its deletion queue, hence
        // force a cleanup.
        if (switchedFromStandbyToPrimary
            && cmd.getAction() == DatanodeProtocol.DNA_REGISTER) {
          this.clearPrimary();
        }
        
        // The standby service thread is allowed to process only a small set
        // of valid commands.
        if (!isPrimaryServiceCached() && !isValidStandbyCommand(cmd)) {
          LOG.warn("Received an invalid command " + cmd.getAction()
              + " from standby " + this.namenodeAddress);
          continue;
        } 
        if (processCommand(cmd, processStartTime) == false) {
          return false;
        }
      } catch (IOException ioe) {
        LOG.warn("Error processing datanode Command", ioe);
      }
    }
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:42,代码来源:OfferService.java

示例10: processCommandFromStandby

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
private boolean processCommandFromStandby(DatanodeCommand cmd,
    BPServiceActor actor) throws IOException {
  if (cmd == null)
    return true;
  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action from standby: DNA_REGISTER");
    actor.reRegister();
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE");
    if (dn.isBlockTokenEnabled) {
      dn.blockPoolTokenSecretManager.addKeys(
          getBlockPoolId(), 
          ((KeyUpdateCommand) cmd).getExportedKeys());
    }
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
  case DatanodeProtocol.DNA_FINALIZE:
  case DatanodeProtocol.DNA_RECOVERBLOCK:
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  return true;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:32,代码来源:BPOfferService.java

示例11: convert

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:PBHelper.java

示例12: convert

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
    builder.setCmdType(DatanodeCommandProto.Type.BlockECRecoveryCommand)
        .setBlkECRecoveryCmd(
            convert((BlockECRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:53,代码来源:PBHelper.java

示例13: processCommand

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
 *
 * @param cmd
 * @return true if further processing may be required or false otherwise.
 * @throws IOException
 */
 private boolean processCommand(DatanodeCommand cmd, long processStartTime)
     throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  boolean retValue = true;
  long startTime = System.currentTimeMillis();

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(namespaceId,
        bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(namespaceId, toDelete);
      }        
      data.invalidate(namespaceId, toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.blocksRemoved.inc(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    shouldServiceRun = false;
    retValue = false;
    break;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
      firstBlockReportSent = false;
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
     storage.finalizedUpgrade(namespaceId);
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
     recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets(),
         processStartTime);
    break;
  case DatanodeProtocol.DNA_RAIDTASK:
    processRaidTaskCommand((RaidTaskCommand) cmd);
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  long endTime = System.currentTimeMillis();
  if (endTime - startTime > 1000) {
    LOG.info("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  }
  return retValue;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:80,代码来源:DataNode.java

示例14: convert

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
        PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:43,代码来源:PBHelper.java

示例15: convert

import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
    case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
      builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
          .setBalancerCmd(
              PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
      builder.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
          .setKeyUpdateCmd(
              PBHelper.convert((KeyUpdateCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_RECOVERBLOCK:
      builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
          .setRecoveryCmd(
              PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_FINALIZE:
      builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
          .setFinalizeCmd(
              PBHelper.convert((FinalizeCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_REGISTER:
      builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
          .setRegisterCmd(REG_CMD_PROTO);
      break;
    case DatanodeProtocol.DNA_TRANSFER:
    case DatanodeProtocol.DNA_INVALIDATE:
    case DatanodeProtocol.DNA_SHUTDOWN:
      builder.setCmdType(DatanodeCommandProto.Type.BlockCommand)
          .setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
      break;
    case DatanodeProtocol.DNA_UNKNOWN: //Not expected
    default:
      builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:44,代码来源:PBHelper.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.DatanodeCommand.getAction方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。