本文整理汇总了Java中org.apache.hadoop.hdfs.util.LightWeightHashSet.remove方法的典型用法代码示例。如果您正苦于以下问题:Java LightWeightHashSet.remove方法的具体用法?Java LightWeightHashSet.remove怎么用?Java LightWeightHashSet.remove使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.util.LightWeightHashSet
的用法示例。
在下文中一共展示了LightWeightHashSet.remove方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: removeFromExcessReplicateMap
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
* If a block is removed from blocksMap, remove it from excessReplicateMap.
*/
private void removeFromExcessReplicateMap(BlockInfo block) {
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightHashSet<BlockInfo> excessReplicas =
excessReplicateMap.get(uuid);
if (excessReplicas != null) {
if (excessReplicas.remove(block)) {
excessBlocksCount.decrementAndGet();
if (excessReplicas.isEmpty()) {
excessReplicateMap.remove(uuid);
}
}
}
}
}
示例2: remove
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/** Remove the block from the specified storage. */
synchronized void remove(final DatanodeInfo dn, final Block block) {
final LightWeightHashSet<Block> v = node2blocks.get(dn);
if (v != null && v.remove(block)) {
numBlocks--;
if (v.isEmpty()) {
node2blocks.remove(dn);
}
}
}
示例3: remove
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/** Remove the block from the specified storage. */
synchronized void remove(final String storageID, final Block block) {
final LightWeightHashSet<Block> v = node2blocks.get(storageID);
if (v != null && v.remove(block)) {
numBlocks--;
if (v.isEmpty()) {
node2blocks.remove(storageID);
}
}
}
示例4: removeStoredBlock
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
* Modify (block-->datanode) map. Possibly generate replication tasks, if the
* removed block is still valid.
*/
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
assert (namesystem.hasWriteLock());
{
if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
blockLog.debug("BLOCK* removeStoredBlock: {} has already been" +
" removed from node {}", storedBlock, node);
return;
}
CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks()
.get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false));
if (cblock != null) {
boolean removed = false;
removed |= node.getPendingCached().remove(cblock);
removed |= node.getCached().remove(cblock);
removed |= node.getPendingUncached().remove(cblock);
if (removed) {
blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching "
+ "related lists on node {}", storedBlock, node);
}
}
//
// It's possible that the block was removed because of a datanode
// failure. If the block is still valid, check if replication is
// necessary. In that case, put block on a possibly-will-
// be-replicated list.
//
BlockCollection bc = getBlockCollection(storedBlock);
if (bc != null) {
bmSafeMode.decrementSafeBlockCount(storedBlock);
updateNeededReplications(storedBlock, -1, 0);
}
//
// We've removed a block from a node, so it's definitely no longer
// in "excess" there.
//
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
node.getDatanodeUuid());
if (excessBlocks != null) {
if (excessBlocks.remove(storedBlock)) {
excessBlocksCount.decrementAndGet();
blockLog.debug("BLOCK* removeStoredBlock: {} is removed from " +
"excessBlocks", storedBlock);
if (excessBlocks.size() == 0) {
excessReplicateMap.remove(node.getDatanodeUuid());
}
}
}
// Remove the replica from corruptReplicas
corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node);
}
}