当前位置: 首页>>代码示例>>Java>>正文


Java BlockListAsLongs.getNumberOfBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.BlockListAsLongs.getNumberOfBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java BlockListAsLongs.getNumberOfBlocks方法的具体用法?Java BlockListAsLongs.getNumberOfBlocks怎么用?Java BlockListAsLongs.getNumberOfBlocks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.protocol.BlockListAsLongs的用法示例。


在下文中一共展示了BlockListAsLongs.getNumberOfBlocks方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyCapturedArguments

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = report.getBlocks();
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestDnRespectsBlockReportSplitThreshold.java

示例2: verifyCapturedArguments

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
private void verifyCapturedArguments(
    ArgumentCaptor<StorageBlockReport[]> captor,
    int expectedReportsPerCall,
    int expectedTotalBlockCount) {

  List<StorageBlockReport[]> listOfReports = captor.getAllValues();
  int numBlocksReported = 0;
  for (StorageBlockReport[] reports : listOfReports) {
    assertThat(reports.length, is(expectedReportsPerCall));

    for (StorageBlockReport report : reports) {
      BlockListAsLongs blockList = new BlockListAsLongs(report.getBlocks());
      numBlocksReported += blockList.getNumberOfBlocks();
    }
  }

  assert(numBlocksReported >= expectedTotalBlockCount);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:TestDnRespectsBlockReportSplitThreshold.java

示例3: getBlockReports

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
private static StorageBlockReport[] getBlockReports(
    DataNode dn, String bpid, boolean corruptOneBlockGs,
    boolean corruptOneBlockLen) {
  Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
      dn.getFSDataset().getBlockReports(bpid);

  // Send block report
  StorageBlockReport[] reports =
      new StorageBlockReport[perVolumeBlockLists.size()];
  boolean corruptedGs = false;
  boolean corruptedLen = false;

  int reportIndex = 0;
  for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
    DatanodeStorage dnStorage = kvPair.getKey();
    BlockListAsLongs blockList = kvPair.getValue();

    // Walk the list of blocks until we find one each to corrupt the
    // generation stamp and length, if so requested.
    for (int i = 0; i < blockList.getNumberOfBlocks(); ++i) {
      if (corruptOneBlockGs && !corruptedGs) {
        blockList.corruptBlockGSForTesting(i, rand);
        LOG.info("Corrupted the GS for block ID " + i);
        corruptedGs = true;
      } else if (corruptOneBlockLen && !corruptedLen) {
        blockList.corruptBlockLengthForTesting(i, rand);
        LOG.info("Corrupted the length for block ID " + i);
        corruptedLen = true;
      } else {
        break;
      }
    }

    reports[reportIndex++] =
        new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
  }

  return reports;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:40,代码来源:BlockReportTestBase.java

示例4: saveReplicas

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
private void saveReplicas(BlockListAsLongs blocksListToPersist) {
  if (blocksListToPersist == null || 
      blocksListToPersist.getNumberOfBlocks()== 0) {
    return;
  }
  File tmpFile = new File(currentDir, REPLICA_CACHE_FILE + ".tmp");
  if (tmpFile.exists() && !tmpFile.delete()) {
    LOG.warn("Failed to delete tmp replicas file in " +
      tmpFile.getPath());
    return;
  }
  File replicaCacheFile = new File(currentDir, REPLICA_CACHE_FILE);
  if (replicaCacheFile.exists() && !replicaCacheFile.delete()) {
    LOG.warn("Failed to delete replicas file in " +
        replicaCacheFile.getPath());
    return;
  }
  
  FileOutputStream out = null;
  try {
    out = new FileOutputStream(tmpFile);
    blocksListToPersist.writeTo(out);
    out.close();
    // Renaming the tmp file to replicas
    Files.move(tmpFile, replicaCacheFile);
  } catch (Exception e) {
    // If write failed, the volume might be bad. Since the cache file is
    // not critical, log the error, delete both the files (tmp and cache)
    // and continue.
    LOG.warn("Failed to write replicas to cache ", e);
    if (replicaCacheFile.exists() && !replicaCacheFile.delete()) {
      LOG.warn("Failed to delete replicas file: " + 
          replicaCacheFile.getPath());
    }
  } finally {
    IOUtils.closeStream(out);
    if (tmpFile.exists() && !tmpFile.delete()) {
      LOG.warn("Failed to delete tmp file in " +
          tmpFile.getPath());
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:43,代码来源:BlockPoolSlice.java

示例5: processBlocksBeingWrittenReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
public boolean processBlocksBeingWrittenReport(DatanodeID nodeID, 
    BlockListAsLongs blocksBeingWritten) 
throws IOException {
  // check if we can discard the report
  if (safeMode != null && !safeMode.shouldProcessRBWReports()) {
    return false;
  }
  writeLock();
  try {
    DatanodeDescriptor dataNode = getDatanode(nodeID);
    if (dataNode == null) {
      throw new IOException("ProcessReport from unregisterted node: "
          + nodeID.getName());
    }
    
    Block block = new Block();

    for (int i = 0; i < blocksBeingWritten.getNumberOfBlocks(); i ++) {
      block.set(blocksBeingWritten.getBlockId(i), 
          blocksBeingWritten.getBlockLen(i), 
          blocksBeingWritten.getBlockGenStamp(i));

      BlockInfo storedBlock = blocksMap.getStoredBlockWithoutMatchingGS(block);

      if (storedBlock == null) {
        rejectAddStoredBlock(
            new Block(block), dataNode,
            "Block not in blockMap with any generation stamp",
            true, false);
        continue;
      }

      INodeFile inode = storedBlock.getINode();
      if (inode == null) {
        rejectAddStoredBlock(
            new Block(block), dataNode,
            "Block does not correspond to any file",
            true, false);
        continue;
      }

      boolean underConstruction = inode.isUnderConstruction();
      boolean isLastBlock = inode.getLastBlock() != null &&
                 inode.getLastBlock().getBlockId() == block.getBlockId();

      // Must be the last block of a file under construction,
      if (!underConstruction) {
        rejectAddStoredBlock(
            new Block(block), dataNode,
            "Reported as block being written but is a block of closed file.",
            true, false);
        continue;
      }

      if (!isLastBlock) {
        rejectAddStoredBlock(
            new Block(block), dataNode,
            "Reported as block being written but not the last block of " +
            "an under-construction file.",
            true, false);
        continue;
      }

      INodeFileUnderConstruction pendingFile = 
                          (INodeFileUnderConstruction)inode;
      boolean added = pendingFile.addTarget(dataNode, block.getGenerationStamp());
      if (added) {
        // Increment only once for each datanode.
        DatanodeDescriptor[] validDNs = pendingFile.getValidTargets();
        if (validDNs != null) {
          incrementSafeBlockCount(validDNs.length, true);            
        }
      }
    }
  } finally {
    writeUnlock();
    checkSafeMode();
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:81,代码来源:FSNamesystem.java

示例6: processBlocksBeingWrittenReport

import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; //导入方法依赖的package包/类
/**
 * It will update the targets for INodeFileUnderConstruction
 * 
 * @param nodeID
 *          - DataNode ID
 * @param blocksBeingWritten
 *          - list of blocks which are still inprogress.
 * @throws IOException
 */
public synchronized void processBlocksBeingWrittenReport(DatanodeID nodeID,
    BlockListAsLongs blocksBeingWritten) throws IOException {
  DatanodeDescriptor dataNode = getDatanode(nodeID);
  if (dataNode == null) {
    throw new IOException("ProcessReport from unregistered node: "
        + nodeID.getName());
  }

  // Check if this datanode should actually be shutdown instead.
  if (shouldNodeShutdown(dataNode)) {
    setDatanodeDead(dataNode);
    throw new DisallowedDatanodeException(dataNode);
  }

  Block block = new Block();

  for (int i = 0; i < blocksBeingWritten.getNumberOfBlocks(); i++) {
    block.set(blocksBeingWritten.getBlockId(i), blocksBeingWritten
        .getBlockLen(i), blocksBeingWritten.getBlockGenStamp(i));

    BlockInfo storedBlock = blocksMap.getStoredBlockWithoutMatchingGS(block);

    if (storedBlock == null) {
      rejectAddStoredBlock(new Block(block), dataNode,
          "Block not in blockMap with any generation stamp");
      continue;
    }

    INodeFile inode = storedBlock.getINode();
    if (inode == null) {
      rejectAddStoredBlock(new Block(block), dataNode,
          "Block does not correspond to any file");
      continue;
    }

    boolean underConstruction = inode.isUnderConstruction();
    boolean isLastBlock = inode.getLastBlock() != null
        && inode.getLastBlock().getBlockId() == block.getBlockId();

    // Must be the last block of a file under construction,
    if (!underConstruction) {
      rejectAddStoredBlock(new Block(block), dataNode,
          "Reported as block being written but is a block of closed file.");
      continue;
    }

    if (!isLastBlock) {
      rejectAddStoredBlock(new Block(block), dataNode,
          "Reported as block being written but not the last block of "
              + "an under-construction file.");
      continue;
    }

    INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction) inode;
    pendingFile.addTarget(dataNode);
    incrementSafeBlockCount(pendingFile.getTargets().length);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:68,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.hdfs.protocol.BlockListAsLongs.getNumberOfBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。