本文整理汇总了Java中org.apache.hadoop.hdfs.util.LightWeightHashSet.contains方法的典型用法代码示例。如果您正苦于以下问题:Java LightWeightHashSet.contains方法的具体用法?Java LightWeightHashSet.contains怎么用?Java LightWeightHashSet.contains使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.util.LightWeightHashSet
的用法示例。
在下文中一共展示了LightWeightHashSet.contains方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addBlock
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* return the length of the added block; 0 if the block is not added
*/
private long addBlock(Block block, List<BlockWithLocations> results) {
ArrayList<String> machineSet =
new ArrayList<String>(blocksMap.numNodes(block));
for (Iterator<DatanodeDescriptor> it =
blocksMap.nodeIterator(block); it.hasNext();) {
String storageID = it.next().getStorageID();
// filter invalidate replicas
LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
if (blocks == null || !blocks.contains(block)) {
machineSet.add(storageID);
}
}
if (machineSet.size() == 0) {
return 0;
} else {
results.add(new BlockWithLocations(block,
machineSet.toArray(new String[machineSet.size()])));
return block.getNumBytes();
}
}
示例2: countNodes
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
* Return the number of nodes hosting a given block, grouped
* by the state of those replicas.
* For a striped block, this includes nodes storing blocks belonging to the
* striped block group.
*/
public NumberReplicas countNodes(Block b) {
int decommissioned = 0;
int decommissioning = 0;
int live = 0;
int readonly = 0;
int corrupt = 0;
int excess = 0;
int stale = 0;
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
for(DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
if (storage.getState() == State.FAILED) {
continue;
} else if (storage.getState() == State.READ_ONLY_SHARED) {
readonly++;
continue;
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) {
corrupt++;
} else if (node.isDecommissionInProgress()) {
decommissioning++;
} else if (node.isDecommissioned()) {
decommissioned++;
} else {
LightWeightHashSet<BlockInfo> blocksExcess = excessReplicateMap.get(
node.getDatanodeUuid());
if (blocksExcess != null && blocksExcess.contains(b)) {
excess++;
} else {
live++;
}
}
if (storage.areBlockContentsStale()) {
stale++;
}
}
return new NumberReplicas(live, readonly, decommissioned, decommissioning,
corrupt, excess, stale);
}
示例3: getReplicaInfo
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入方法依赖的package包/类
/**
* Display info of each replica for replication block.
* For striped block group, display info of each internal block.
*/
private String getReplicaInfo(BlockInfo storedBlock) {
if (!(showLocations || showRacks || showReplicaDetails)) {
return "";
}
final boolean isComplete = storedBlock.isComplete();
DatanodeStorageInfo[] storages = isComplete ?
blockManager.getStorages(storedBlock) :
storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
StringBuilder sb = new StringBuilder(" [");
for (int i = 0; i < storages.length; i++) {
DatanodeStorageInfo storage = storages[i];
DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
if (showRacks) {
sb.append(NodeBase.getPath(dnDesc));
} else {
sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(),
storage.getStorageType()));
}
if (showReplicaDetails) {
LightWeightHashSet<BlockInfo> blocksExcess =
blockManager.excessReplicateMap.get(dnDesc.getDatanodeUuid());
Collection<DatanodeDescriptor> corruptReplicas =
blockManager.getCorruptReplicas(storedBlock);
sb.append("(");
if (dnDesc.isDecommissioned()) {
sb.append("DECOMMISSIONED)");
} else if (dnDesc.isDecommissionInProgress()) {
sb.append("DECOMMISSIONING)");
} else if (corruptReplicas != null
&& corruptReplicas.contains(dnDesc)) {
sb.append("CORRUPT)");
} else if (blocksExcess != null
&& blocksExcess.contains(storedBlock)) {
sb.append("EXCESS)");
} else if (dnDesc.isStale(this.staleInterval)) {
sb.append("STALE_NODE)");
} else if (storage.areBlockContentsStale()) {
sb.append("STALE_BLOCK_CONTENT)");
} else {
sb.append("LIVE)");
}
}
if (i < storages.length - 1) {
sb.append(", ");
}
}
sb.append(']');
return sb.toString();
}