当前位置: 首页>>代码示例>>Java>>正文


Java LightWeightHashSet.size方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.util.LightWeightHashSet.size方法的典型用法代码示例。如果您正苦于以下问题:Java LightWeightHashSet.size方法的具体用法?Java LightWeightHashSet.size怎么用?Java LightWeightHashSet.size使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.util.LightWeightHashSet的用法示例。


在下文中一共展示了LightWeightHashSet.size方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: dump

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:InvalidateBlocks.java

示例2: dumpExcessReplicasSets

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
 * dumps the contents of recentInvalidateSets
 */
void dumpExcessReplicasSets(PrintWriter out) {
  int size = excessReplicateMap.values().size();
  out.println("Metasave: Excess blocks " + excessBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : excessReplicateMap
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName());
      blocks.printDetails(out);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:FSNamesystem.java

示例3: dumpRecentInvalidateSets

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
 * dumps the contents of recentInvalidateSets
 */
private void dumpRecentInvalidateSets(PrintWriter out) {
  int size = recentInvalidateSets.values().size();
  out.println("Metasave: Blocks " + pendingDeletionBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : recentInvalidateSets
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName() + blocks);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:FSNamesystem.java

示例4: dump

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeManager.getDatanode(entry.getKey()));
      out.println(blocks);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:InvalidateBlocks.java

示例5: remove

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/** Remove a storage from the invalidatesSet */
synchronized void remove(final DatanodeInfo dn) {
  final LightWeightHashSet<Block> blocks = node2blocks.remove(dn);
  if (blocks != null) {
    numBlocks -= blocks.size();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:InvalidateBlocks.java

示例6: removeFromInvalidates

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
 * Remove a datanode from the invalidatesSet
 *
 * @param n datanode
 */
void removeFromInvalidates(String storageID) {
  LightWeightHashSet<Block> blocks = recentInvalidateSets.remove(storageID);
  if (blocks != null) {
    pendingDeletionBlocksCount -= blocks.size();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:FSNamesystem.java

示例7: getBlocksBeingWrittenReport

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
* Return a table of blocks being written data
 * @throws IOException 
*/
public Block[] getBlocksBeingWrittenReport(int namespaceId) throws IOException {
  LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>();
  volumes.getBlocksBeingWrittenInfo(namespaceId, blockSet);
  Block blockTable[] = new Block[blockSet.size()];
  int i = 0;
    for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
  blockTable[i] = it.next();
  }
  return blockTable;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:15,代码来源:FSDataset.java

示例8: remove

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/** Remove a storage from the invalidatesSet */
synchronized void remove(final String storageID) {
  final LightWeightHashSet<Block> blocks = node2blocks.remove(storageID);
  if (blocks != null) {
    numBlocks -= blocks.size();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:8,代码来源:InvalidateBlocks.java

示例9: getBlockReport

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
 * Return a table of block data for given namespace
 */
public Block[] getBlockReport(int namespaceId) {
  // getBlockReport doesn't grant the global lock as we believe it is
  // OK to get some inconsistent partial results. The inconsistent
  // information will finally be fixed by the next incremental
  LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>();
  volumes.getBlockInfo(namespaceId, blockSet);
  Block blockTable[] = new Block[blockSet.size()];
  int i = 0;
  for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
    blockTable[i] = it.next();
  }
  return blockTable;
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:17,代码来源:FSDataset.java

示例10: removeStoredBlock

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
 * Modify (block-->datanode) map. Possibly generate replication tasks, if the
 * removed block is still valid.
 */
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
  blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
  assert (namesystem.hasWriteLock());
  {
    if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
      blockLog.debug("BLOCK* removeStoredBlock: {} has already been" +
          " removed from node {}", storedBlock, node);
      return;
    }

    CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks()
        .get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false));
    if (cblock != null) {
      boolean removed = false;
      removed |= node.getPendingCached().remove(cblock);
      removed |= node.getCached().remove(cblock);
      removed |= node.getPendingUncached().remove(cblock);
      if (removed) {
        blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching "
            + "related lists on node {}", storedBlock, node);
      }
    }

    //
    // It's possible that the block was removed because of a datanode
    // failure. If the block is still valid, check if replication is
    // necessary. In that case, put block on a possibly-will-
    // be-replicated list.
    //
    BlockCollection bc = getBlockCollection(storedBlock);
    if (bc != null) {
      bmSafeMode.decrementSafeBlockCount(storedBlock);
      updateNeededReplications(storedBlock, -1, 0);
    }

    //
    // We've removed a block from a node, so it's definitely no longer
    // in "excess" there.
    //
    LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
        node.getDatanodeUuid());
    if (excessBlocks != null) {
      if (excessBlocks.remove(storedBlock)) {
        excessBlocksCount.decrementAndGet();
        blockLog.debug("BLOCK* removeStoredBlock: {} is removed from " +
            "excessBlocks", storedBlock);
        if (excessBlocks.size() == 0) {
          excessReplicateMap.remove(node.getDatanodeUuid());
        }
      }
    }

    // Remove the replica from corruptReplicas
    corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:61,代码来源:BlockManager.java


注:本文中的org.apache.hadoop.hdfs.util.LightWeightHashSet.size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。