本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockCommand类的典型用法代码示例。如果您正苦于以下问题:Java BlockCommand类的具体用法?Java BlockCommand怎么用?Java BlockCommand使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockCommand类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了BlockCommand类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
long dfsBytes = blockFile.length() + metaFile.length();
boolean result;
result = (trashDirectory == null) ? deleteFiles() : moveFiles();
if (!result) {
LOG.warn("Unexpected error trying to "
+ (trashDirectory == null ? "delete" : "move")
+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + blockFile + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
}
volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " file " + blockFile);
}
updateDeletedBlockId(block);
IOUtils.cleanup(null, volumeRef);
}
示例2: convert
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd());
default:
return null;
}
}
示例3: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例4: run
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
final long blockLength = blockFile.length();
final long metaLength = metaFile.length();
boolean result;
result = (trashDirectory == null) ? deleteFiles() : moveFiles();
if (!result) {
LOG.warn("Unexpected error trying to "
+ (trashDirectory == null ? "delete" : "move")
+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + blockFile + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
}
volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " file " + blockFile);
}
updateDeletedBlockId(block);
IOUtils.cleanup(null, volumeRef);
}
示例5: convert
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd());
case BlockECRecoveryCommand:
return PBHelper.convert(proto.getBlkECRecoveryCmd());
default:
return null;
}
}
示例6: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(storage,
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
bcmd.getTargetStorageIDs());
}
}
}
return 0;
}
示例7: run
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
long dfsBytes = blockFile.length() + metaFile.length();
boolean result;
result = (trashDirectory == null) ? deleteFiles() : moveFiles();
if (!result) {
LOG.warn("Unexpected error trying to "
+ (trashDirectory == null ? "delete" : "move")
+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + blockFile + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
}
volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " file " + blockFile);
}
IOUtils.cleanup(null, volumeRef);
}
示例8: testGetInvalidateBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
* Test that getInvalidateBlocks observes the maxlimit.
*/
public void testGetInvalidateBlocks() throws Exception {
final int MAX_BLOCKS = 10;
final int REMAINING_BLOCKS = 2;
final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
DatanodeDescriptor dd = new DatanodeDescriptor();
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
for (int i=0; i<MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.getBlocks().length, MAX_LIMIT);
bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
示例9: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(
dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
}
}
}
return 0;
}
示例10: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand) cmd;
return transferBlocks(bcmd.getBlocks(),
bcmd.getTargets());
}
}
}
return 0;
}
示例11: blockReportNew
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
if (runInfo.shutdown || !runInfo.isRunning) {
return null;
}
if (ignoreDatanodes()) {
LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
" to back off");
// Do not process block reports yet as the ingest thread is catching up
return AvatarDatanodeCommand.BACKOFF;
}
if (currentAvatar == Avatar.STANDBY) {
Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);
// standby should send only DNA_RETRY
BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
failed.toArray(new Block[failed.size()]));
return bCmd;
} else {
// only the primary can send DNA_FINALIZE
return super.blockReport(nodeReg, rep);
}
}
示例12: run
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
long dfsBytes = blockFile.length() + metaFile.length();
if (!blockFile.delete() || (!metaFile.delete() && metaFile.exists())) {
LOG.warn("Unexpected error trying to delete block "
+ block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + blockFile + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block);
}
volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " file " + blockFile);
}
}
示例13: convert
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
}
return null;
}
示例14: replicateBlocks
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
rep, 0, 0, 0).getCommands();
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
// Send a copy of a block to another datanode
BlockCommand bcmd = (BlockCommand)cmd;
return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
}
}
}
return 0;
}
示例15: run
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
long dfsBytes = blockFile.length() + metaFile.length();
boolean result;
result = (trashDirectory == null) ? deleteFiles() : moveFiles();
if (!result) {
LOG.warn("Unexpected error trying to "
+ (trashDirectory == null ? "delete" : "move")
+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + blockFile + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
}
volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " file " + blockFile);
}
}