当前位置: 首页>>代码示例>>Java>>正文


Java BlockCommand类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockCommand的典型用法代码示例。如果您正苦于以下问题:Java BlockCommand类的具体用法?Java BlockCommand怎么用?Java BlockCommand使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


BlockCommand类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了BlockCommand类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:FsDatasetAsyncDiskService.java

示例2: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:PBHelper.java

示例3: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:NNThroughputBenchmark.java

示例4: run

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
  final long blockLength = blockFile.length();
  final long metaLength = metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
    volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:FsDatasetAsyncDiskService.java

示例5: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  case BlockECRecoveryCommand:
    return PBHelper.convert(proto.getBlkECRecoveryCmd());
  default:
    return null;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:PBHelper.java

示例6: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:23,代码来源:NNThroughputBenchmark.java

示例7: run

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  IOUtils.cleanup(null, volumeRef);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:23,代码来源:FsDatasetAsyncDiskService.java

示例8: testGetInvalidateBlocks

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestDatanodeDescriptor.java

示例9: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:NNThroughputBenchmark.java

示例10: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
	// register datanode
	DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
			DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
	if (cmds != null) {
		for (DatanodeCommand cmd : cmds) {
			if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
				// Send a copy of a block to another datanode
				BlockCommand bcmd = (BlockCommand) cmd;
				return transferBlocks(bcmd.getBlocks(),
						bcmd.getTargets());
			}
		}
	}
	return 0;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:NNThroughputBenchmark.java

示例11: blockReportNew

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }
  
  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    // standby should send only DNA_RETRY
    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    // only the primary can send DNA_FINALIZE
    return super.blockReport(nodeReg, rep);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:AvatarNode.java

示例12: run

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  if (!blockFile.delete() || (!metaFile.delete() && metaFile.exists())) {
    LOG.warn("Unexpected error trying to delete block "
        + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block);
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:FsDatasetAsyncDiskService.java

示例13: convert

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  }
  return null;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:PBHelper.java

示例14: replicateBlocks

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(),
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0, 0, 0).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:NNThroughputBenchmark.java

示例15: run

import org.apache.hadoop.hdfs.server.protocol.BlockCommand; //导入依赖的package包/类
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
}
 
开发者ID:bikash,项目名称:PDHC,代码行数:22,代码来源:FsDatasetAsyncDiskService.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlockCommand类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。