当前位置: 首页>>代码示例>>Java>>正文


Java BlockReport类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlockReport的典型用法代码示例。如果您正苦于以下问题:Java BlockReport类的具体用法?Java BlockReport怎么用?Java BlockReport使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


BlockReport类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了BlockReport类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: blockReportNew

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }
  
  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    // standby should send only DNA_RETRY
    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    // only the primary can send DNA_FINALIZE
    return super.blockReport(nodeReg, rep);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:AvatarNode.java

示例2: calculateMismatchedHashes

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
private HashMatchingResult calculateMismatchedHashes(DatanodeDescriptor dn,
    BlockReport report) throws IOException {
  List<HashBucket> allMachineHashes = HashBuckets.getInstance()
      .getBucketsForDatanode(dn);
  List<Integer> matchedBuckets = new ArrayList<>();
  List<Integer> mismatchedBuckets = new ArrayList<>();
  
  for (int i = 0; i < report.getBuckets().length; i++){
    boolean matched = false;
    for (HashBucket bucket : allMachineHashes){
      if (bucket.getBucketId() == i && bucket.getHash() == report
          .getHashes()[i]){
        matched = true;
        break;
      }
    }
    if (matched){
      matchedBuckets.add(i);
    } else {
      mismatchedBuckets.add(i);
    }
  }
  
  return new HashMatchingResult(matchedBuckets, mismatchedBuckets);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:BlockManager.java

示例3: register

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"), "", getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo, ""), new ExportedBlockKeys(),
      VersionInfo.getVersion());
  DataNode.setNewStorageID(dnRegistration);
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(dnRegistration.getStorageID());
  final StorageBlockReport[] reports = {new StorageBlockReport(storage,
      BlockReport.builder(NUM_BUCKETS).build())};
  nameNodeProto.blockReport(dnRegistration,
      nameNode.getNamesystem().getBlockPoolId(), reports);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:21,代码来源:NNThroughputBenchmark.java

示例4: testSafeModeIBRAfterIncremental

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
@Test
public void testSafeModeIBRAfterIncremental() throws Exception {
  DatanodeDescriptor node = spy(nodes.get(0));
  node.setStorageID("dummy-storage");
  node.isAlive = true;

  DatanodeRegistration nodeReg =
      new DatanodeRegistration(node, null, null, "");

  // pretend to be in safemode
  doReturn(true).when(fsn).isInStartupSafeMode();

  // register new node
  bm.getDatanodeManager().registerDatanode(nodeReg);
  bm.getDatanodeManager().addDatanode(node); // swap in spy    
  assertEquals(node, bm.getDatanodeManager().getDatanode(node));
  assertTrue(node.isFirstBlockReport());
  // send block report while pretending to already have blocks
  reset(node);
  doReturn(1).when(node).numBlocks();
  bm.processReport(node, "pool", BlockReport.builder(numBuckets).build());
  verify(node).receivedBlockReport();
  assertFalse(node.isFirstBlockReport());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:25,代码来源:TestBlockManager.java

示例5: blockReport_06

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
/**
 * Test creates a file and closes it.
 * The second datanode is started in the cluster.
 * As soon as the replication process is completed test runs
 * Block report and checks that no underreplicated blocks are left
 *
 * @throws IOException
 *     in case of an error
 */
@Test
public void blockReport_06() throws Exception {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  final int DN_N1 = DN_N0 + 1;

  ArrayList<Block> blocks = writeFile(METHOD_NAME, FILE_SIZE, filePath);
  startDNandWait(filePath, true);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N1);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] report =
      {new StorageBlockReport(new DatanodeStorage(dnR.getStorageID()),
          BlockReport.builder(NUM_BUCKETS).addAllAsFinalized(blocks).build())};
  cluster.getNameNodeRpc().blockReport(dnR, poolId, report);
  printStats();
  Thread.sleep(10000); //HOP: wait for the replication monitor to catch up
  assertEquals("Wrong number of PendingReplication Blocks", 0,
      cluster.getNamesystem().getUnderReplicatedBlocks());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestBlockReport.java

示例6: blockReportNew

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }

  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    return super.blockReport(nodeReg, rep);
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:22,代码来源:AvatarNode.java

示例7: blocksBeingWrittenReport

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
/**
* add new replica blocks to the Inode to target mapping
* also add the Inode file to DataNodeDesc
*/
public void blocksBeingWrittenReport(DatanodeRegistration nodeReg,
    BlockReport blocks) throws IOException {
  verifyRequest(nodeReg);
  long[] blocksAsLong = blocks.getBlockReportInLongs();
  BlockListAsLongs blist = new BlockListAsLongs(blocksAsLong);
  boolean processed = namesystem.processBlocksBeingWrittenReport(nodeReg, blist);

  String message = "*BLOCK* NameNode.blocksBeingWrittenReport: "
      +"from "+nodeReg.getName()+" "+blist.getNumberOfBlocks() +" blocks";
  if (!processed) {
    message += " was discarded.";
  }
  stateChangeLog.info(message);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:NameNode.java

示例8: blockReportWithRetries

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
protected Collection<Block> blockReportWithRetries(
    DatanodeRegistration nodeReg, BlockReport blocks) throws IOException {
  verifyRequest(nodeReg);
  myMetrics.numBlockReport.inc();
  BlockListAsLongs blist =
    new BlockListAsLongs(blocks.getBlockReportInLongs());
  stateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from "
      + nodeReg.getName() + " " + blist.getNumberOfBlocks() + " blocks");

  return namesystem.processReport(nodeReg, blist);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:NameNode.java

示例9: sendBlocksBeingWrittenReport

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
/**
 * Sends a 'Blocks Being Written' report to the given node.
 *
 * @param node the node to send the report to
 * @throws IOException
 */
public void sendBlocksBeingWrittenReport(DatanodeProtocol node,
    int namespaceId, DatanodeRegistration nsRegistration) throws IOException {
  Block[] blocks = data.getBlocksBeingWrittenReport(namespaceId);
  if (blocks != null && blocks.length != 0) {
    long[] blocksAsLong =
      BlockListAsLongs.convertToArrayLongs(blocks);
    BlockReport bbwReport = new BlockReport(blocksAsLong);
    node.blocksBeingWrittenReport(nsRegistration, bbwReport);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:DataNode.java

示例10: blockReport

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
    String poolId, StorageBlockReport[] reports) throws IOException {
  verifyRequest(nodeReg);
  
  BlockReport blist = reports[0].getReport(); // Assume no federation '0'
  if (blockStateChangeLog.isDebugEnabled()) {
    blockStateChangeLog.debug(
        "*BLOCK* NameNode.blockReport: " + "from " + nodeReg + " " +
            blist.getNumBlocks() + " blocks");
  }

  namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
  return new FinalizeCommand(poolId);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:NameNodeRpcServer.java

示例11: applyHash

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
public void applyHash(int storageId, HdfsServerConstants.ReplicaState state,
    Block block ) throws TransactionContextException, StorageException {
  int bucketId = getBucketForBlock(block);
  HashBucket bucket = getBucket(storageId, bucketId);
 
  
  long newHash = bucket.getHash() + BlockReport.hash(block, state);
  LOG.debug("Applying block:" + blockToString
      (block) + "sid: " + storageId + "state: " + state.name() + ", hash: "
      + BlockReport.hash(block, state));
  
  bucket.setHash(newHash);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:HashBuckets.java

示例12: undoHash

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
public void undoHash(int storageId, HdfsServerConstants.ReplicaState
    state, Block block) throws TransactionContextException, StorageException {
  int bucketId = getBucketForBlock(block);
  HashBucket bucket = getBucket(storageId, bucketId);
  long newHash = bucket.getHash() - BlockReport.hash(block, state);
  LOG.debug("Undo block:" + blockToString
      (block) + "sid: " + storageId + "state: " + state.name() + ", hash: " +
      BlockReport.hash(block,state));
  
  bucket.setHash(newHash);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:12,代码来源:HashBuckets.java

示例13: getBlockReport

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
/**
 * Generates a block report from the in-memory block map.
 */
@Override // FsDatasetSpi
public BlockReport getBlockReport(String bpid) {
  int size = volumeMap.size(bpid);
  BlockReport.Builder builder = BlockReport.builder(NUM_BUCKETS);
  if (size == 0) {
    return builder.build();
  }
  
  synchronized (this) {
    for (ReplicaInfo b : volumeMap.replicas(bpid)) {
      switch (b.getState()) {
        case FINALIZED:
        case RBW:
        case RWR:
          builder.add(b);
          break;
        case RUR:
          ReplicaUnderRecovery rur = (ReplicaUnderRecovery) b;
          builder.add(rur.getOriginalReplica());
          break;
        case TEMPORARY:
          break;
        default:
          assert false : "Illegal ReplicaInfo state.";
      }
    }
    return builder.build();
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:33,代码来源:FsDatasetImpl.java

示例14: convert

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
public static DatanodeProtocolProtos.BlockReportProto convert(BlockReport report) {
 
  List<DatanodeProtocolProtos.BlockReportBucketProto> bucketProtos = new
      ArrayList<>();
  for (BlockReportBucket bucket : report.getBuckets()){

    DatanodeProtocolProtos.BlockReportBucketProto.Builder bucketBuilder =
        DatanodeProtocolProtos.BlockReportBucketProto.newBuilder();
    for (BlockReportBlock block : bucket.getBlocks()){
      bucketBuilder.addBlocks(
          DatanodeProtocolProtos.BlockReportBlockProto.newBuilder()
              .setBlockId(block.getBlockId())
              .setGenerationStamp(block.getGenerationStamp())
              .setLength(block.getLength())
              .setState(convert(block.getState())));
    }
    bucketProtos.add(bucketBuilder.build());
  }

  List<Long> hashes = new ArrayList<>();
  for (long hash : report.getHashes()){
    hashes.add(hash);
  }
  
  return DatanodeProtocolProtos.BlockReportProto.newBuilder()
      .addAllBuckets(bucketProtos)
      .addAllHashes(hashes)
      .build();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:PBHelper.java

示例15: getAllBlockReports

import org.apache.hadoop.hdfs.server.protocol.BlockReport; //导入依赖的package包/类
/**
 * @return block reports from all data nodes
 * BlockListAsLongs is indexed in the same order as the list of datanodes
 * returned by getDataNodes()
 */
public Iterable<BlockReportBlock>[] getAllBlockReports(String bpid) {
  int numDataNodes = dataNodes.size();
  Iterable<BlockReportBlock>[] result = new BlockReport[numDataNodes];
  for (int i = 0; i < numDataNodes; ++i) {
    result[i] = getBlockReport(bpid, i);
  }
  return result;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:MiniDFSCluster.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlockReport类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。