本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.DatanodeCommand.getAction方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeCommand.getAction方法的具体用法?Java DatanodeCommand.getAction怎么用?Java DatanodeCommand.getAction使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.protocol.DatanodeCommand
的用法示例。
在下文中一共展示了DatanodeCommand.getAction方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例2: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例3: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(
dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
}
}
}
return 0;
}
示例4: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand) cmd;
return transferBlocks(bcmd.getBlocks(),
bcmd.getTargets());
}
}
}
return 0;
}
示例5: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0, 0, 0).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
}
}
}
return 0;
}
示例6: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例7: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused")
// keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep =
{new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY,
DF_USED, DF_CAPACITY - DF_USED, DF_USED)};
DatanodeCommand[] cmds =
nameNodeProto.sendHeartbeat(dnRegistration, rep, 0, 0, 0)
.getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand) cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
}
}
}
return 0;
}
示例8: isValidStandbyCommand
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
private static boolean isValidStandbyCommand(DatanodeCommand cmd) {
for (int validCommand : validStandbyCommands) {
if (cmd.getAction() == validCommand) {
return true;
}
}
return false;
}
示例9: processCommand
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
* Process an array of datanode commands. This function has logic to check for
* failover. Any commands should be processed using this function as an
* entry point.
*
* @param cmds an array of datanode commands
* @return true if further processing may be required or false otherwise.
*/
private boolean processCommand(DatanodeCommand[] cmds, long processStartTime)
throws InterruptedException {
if (cmds != null) {
// at each heartbeat the standby offer service will talk to ZK!
boolean switchedFromStandbyToPrimary = checkFailover();
for (DatanodeCommand cmd : cmds) {
try {
// The datanode has received a register command after the failover, this
// means that the offerservice thread for the datanode was down for a
// while and it most probably did not clean up its deletion queue, hence
// force a cleanup.
if (switchedFromStandbyToPrimary
&& cmd.getAction() == DatanodeProtocol.DNA_REGISTER) {
this.clearPrimary();
}
// The standby service thread is allowed to process only a small set
// of valid commands.
if (!isPrimaryServiceCached() && !isValidStandbyCommand(cmd)) {
LOG.warn("Received an invalid command " + cmd.getAction()
+ " from standby " + this.namenodeAddress);
continue;
}
if (processCommand(cmd, processStartTime) == false) {
return false;
}
} catch (IOException ioe) {
LOG.warn("Error processing datanode Command", ioe);
}
}
}
return true;
}
示例10: processCommandFromStandby
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
private boolean processCommandFromStandby(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
if (cmd == null)
return true;
switch(cmd.getAction()) {
case DatanodeProtocol.DNA_REGISTER:
// namenode requested a registration - at start or if NN lost contact
LOG.info("DatanodeCommand action from standby: DNA_REGISTER");
actor.reRegister();
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE");
if (dn.isBlockTokenEnabled) {
dn.blockPoolTokenSecretManager.addKeys(
getBlockPoolId(),
((KeyUpdateCommand) cmd).getExportedKeys());
}
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
case DatanodeProtocol.DNA_FINALIZE:
case DatanodeProtocol.DNA_RECOVERBLOCK:
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
break;
default:
LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
}
return true;
}
示例11: convert
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
if (datanodeCommand == null) {
return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
.build();
}
switch (datanodeCommand.getAction()) {
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
.setBalancerCmd(
PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
builder
.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
.setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
.setRecoveryCmd(
PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_FINALIZE:
builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
.setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_REGISTER:
builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
.setRegisterCmd(REG_CMD_PROTO);
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_CACHE:
case DatanodeProtocol.DNA_UNCACHE:
builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_UNKNOWN: //Not expected
default:
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
}
return builder.build();
}
示例12: convert
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
if (datanodeCommand == null) {
return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
.build();
}
switch (datanodeCommand.getAction()) {
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
.setBalancerCmd(
PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
builder
.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
.setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
.setRecoveryCmd(
PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_FINALIZE:
builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
.setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_REGISTER:
builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
.setRegisterCmd(REG_CMD_PROTO);
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_CACHE:
case DatanodeProtocol.DNA_UNCACHE:
builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
builder.setCmdType(DatanodeCommandProto.Type.BlockECRecoveryCommand)
.setBlkECRecoveryCmd(
convert((BlockECRecoveryCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_UNKNOWN: //Not expected
default:
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
}
return builder.build();
}
示例13: processCommand
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
/**
*
* @param cmd
* @return true if further processing may be required or false otherwise.
* @throws IOException
*/
private boolean processCommand(DatanodeCommand cmd, long processStartTime)
throws IOException {
if (cmd == null)
return true;
final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;
boolean retValue = true;
long startTime = System.currentTimeMillis();
switch(cmd.getAction()) {
case DatanodeProtocol.DNA_TRANSFER:
// Send a copy of a block to another datanode
transferBlocks(namespaceId,
bcmd.getBlocks(), bcmd.getTargets());
myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
break;
case DatanodeProtocol.DNA_INVALIDATE:
//
// Some local block(s) are obsolete and can be
// safely garbage-collected.
//
Block toDelete[] = bcmd.getBlocks();
try {
if (blockScanner != null) {
blockScanner.deleteBlocks(namespaceId, toDelete);
}
data.invalidate(namespaceId, toDelete);
} catch(IOException e) {
checkDiskError();
throw e;
}
myMetrics.blocksRemoved.inc(toDelete.length);
break;
case DatanodeProtocol.DNA_SHUTDOWN:
// shut down the data node
shouldServiceRun = false;
retValue = false;
break;
case DatanodeProtocol.DNA_REGISTER:
// namenode requested a registration - at start or if NN lost contact
LOG.info("DatanodeCommand action: DNA_REGISTER");
if (shouldRun) {
register();
firstBlockReportSent = false;
}
break;
case DatanodeProtocol.DNA_FINALIZE:
storage.finalizedUpgrade(namespaceId);
break;
case UpgradeCommand.UC_ACTION_START_UPGRADE:
// start distributed upgrade here
processDistributedUpgradeCommand((UpgradeCommand)cmd);
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets(),
processStartTime);
break;
case DatanodeProtocol.DNA_RAIDTASK:
processRaidTaskCommand((RaidTaskCommand) cmd);
break;
default:
LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
}
long endTime = System.currentTimeMillis();
if (endTime - startTime > 1000) {
LOG.info("processCommand() took " + (endTime - startTime)
+ " msec to process command " + cmd.getAction() + " from " + nnAddr);
} else if (LOG.isDebugEnabled()) {
LOG.debug("processCommand() took " + (endTime - startTime)
+ " msec to process command " + cmd.getAction() + " from " + nnAddr);
}
return retValue;
}
示例14: convert
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
if (datanodeCommand == null) {
return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
.build();
}
switch (datanodeCommand.getAction()) {
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
.setBalancerCmd(
PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
builder
.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
.setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
.setRecoveryCmd(
PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_FINALIZE:
builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
.setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_REGISTER:
builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
.setRegisterCmd(REG_CMD_PROTO);
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
PBHelper.convert((BlockCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_UNKNOWN: //Not expected
default:
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
}
return builder.build();
}
示例15: convert
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; //导入方法依赖的package包/类
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
if (datanodeCommand == null) {
return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
.build();
}
switch (datanodeCommand.getAction()) {
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
.setBalancerCmd(
PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
builder.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
.setKeyUpdateCmd(
PBHelper.convert((KeyUpdateCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
.setRecoveryCmd(
PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_FINALIZE:
builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
.setFinalizeCmd(
PBHelper.convert((FinalizeCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_REGISTER:
builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
.setRegisterCmd(REG_CMD_PROTO);
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand)
.setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_UNKNOWN: //Not expected
default:
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
}
return builder.build();
}