本文整理汇总了Java中org.apache.hadoop.hdfs.util.LightWeightHashSet类的典型用法代码示例。如果您正苦于以下问题:Java LightWeightHashSet类的具体用法?Java LightWeightHashSet怎么用?Java LightWeightHashSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LightWeightHashSet类属于org.apache.hadoop.hdfs.util包,在下文中一共展示了LightWeightHashSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: add
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Add a block to the block collection
* which will be invalidated on the specified datanode.
*/
synchronized void add(final Block block, final DatanodeInfo datanode,
final boolean log) {
LightWeightHashSet<Block> set = node2blocks.get(datanode);
if (set == null) {
set = new LightWeightHashSet<Block>();
node2blocks.put(datanode, set);
}
if (set.add(block)) {
numBlocks++;
if (log) {
NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
getClass().getSimpleName(), block, datanode);
}
}
}
示例2: dump
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
final int size = node2blocks.values().size();
out.println("Metasave: Blocks " + numBlocks
+ " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
final LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
out.println(entry.getKey());
out.println(blocks);
}
}
}
示例3: removeFromExcessReplicateMap
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* If a block is removed from blocksMap, remove it from excessReplicateMap.
*/
private void removeFromExcessReplicateMap(BlockInfo block) {
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightHashSet<BlockInfo> excessReplicas =
excessReplicateMap.get(uuid);
if (excessReplicas != null) {
if (excessReplicas.remove(block)) {
excessBlocksCount.decrementAndGet();
if (excessReplicas.isEmpty()) {
excessReplicateMap.remove(uuid);
}
}
}
}
}
示例4: add
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Add a block to the block collection
* which will be invalidated on the specified datanode.
*/
synchronized void add(final Block block, final DatanodeInfo datanode,
final boolean log) {
LightWeightHashSet<Block> set = node2blocks.get(datanode);
if (set == null) {
set = new LightWeightHashSet<Block>();
node2blocks.put(datanode, set);
}
if (set.add(block)) {
numBlocks++;
if (log) {
NameNode.blockStateChangeLog.debug("BLOCK* {}: add {} to {}",
getClass().getSimpleName(), block, datanode);
}
}
}
示例5: addBlock
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* return the length of the added block; 0 if the block is not added
*/
private long addBlock(Block block, List<BlockWithLocations> results) {
ArrayList<String> machineSet =
new ArrayList<String>(blocksMap.numNodes(block));
for (Iterator<DatanodeDescriptor> it =
blocksMap.nodeIterator(block); it.hasNext();) {
String storageID = it.next().getStorageID();
// filter invalidate replicas
LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
if (blocks == null || !blocks.contains(block)) {
machineSet.add(storageID);
}
}
if (machineSet.size() == 0) {
return 0;
} else {
results.add(new BlockWithLocations(block,
machineSet.toArray(new String[machineSet.size()])));
return block.getNumBytes();
}
}
示例6: addToInvalidatesNoLog
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Adds block to list of blocks which will be invalidated on
* specified datanode
*
* @param b block
* @param n datanode
*/
void addToInvalidatesNoLog(Block b, DatanodeInfo n, boolean ackRequired) {
// We are the standby avatar and we don't want to add blocks to the
// invalidates list.
if (this.getNameNode().shouldRetryAbsentBlocks()) {
return;
}
LightWeightHashSet<Block> invalidateSet = recentInvalidateSets.get(n
.getStorageID());
if (invalidateSet == null) {
invalidateSet = new LightWeightHashSet<Block>();
recentInvalidateSets.put(n.getStorageID(), invalidateSet);
}
if(!ackRequired){
b.setNumBytes(BlockFlags.NO_ACK);
}
if (invalidateSet.add(b)) {
pendingDeletionBlocksCount++;
}
}
示例7: dumpExcessReplicasSets
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* dumps the contents of recentInvalidateSets
*/
void dumpExcessReplicasSets(PrintWriter out) {
int size = excessReplicateMap.values().size();
out.println("Metasave: Excess blocks " + excessBlocksCount
+ " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
for (Map.Entry<String, LightWeightHashSet<Block>> entry : excessReplicateMap
.entrySet()) {
LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
out.println(datanodeMap.get(entry.getKey()).getName());
blocks.printDetails(out);
}
}
}
示例8: dumpRecentInvalidateSets
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* dumps the contents of recentInvalidateSets
*/
private void dumpRecentInvalidateSets(PrintWriter out) {
int size = recentInvalidateSets.values().size();
out.println("Metasave: Blocks " + pendingDeletionBlocksCount
+ " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
for (Map.Entry<String, LightWeightHashSet<Block>> entry : recentInvalidateSets
.entrySet()) {
LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
out.println(datanodeMap.get(entry.getKey()).getName() + blocks);
}
}
}
示例9: clearReplicationQueues
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Clear replication queues. This is used by standby avatar to reclaim memory.
*/
void clearReplicationQueues() {
writeLock();
try {
synchronized (neededReplications) {
neededReplications.clear();
}
underReplicatedBlocksCount = 0;
corruptReplicas.clear();
corruptReplicaBlocksCount = 0;
overReplicatedBlocks.clear();
raidEncodingTasks.clear();
excessReplicateMap = new HashMap<String, LightWeightHashSet<Block>>();
excessBlocksCount = 0;
} finally {
writeUnlock();
}
}
示例10: scanNamespace
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
void scanNamespace() {
startNewPeriod();
// Create a new processedBlocks structure
processedBlocks = new LightWeightHashSet<Long>();
if (verificationLog != null) {
try {
verificationLog.openCurFile();
} catch (FileNotFoundException ex) {
LOG.warn("Could not open current file");
}
}
if (!assignInitialVerificationTimes()) {
return;
}
// Start scanning
scan();
}
示例11: getBlocksBeingWrittenInfo
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) throws IOException {
if (rbwDir == null) {
return;
}
File[] blockFiles = rbwDir.listFiles();
if (blockFiles == null) {
return;
}
String[] blockFileNames = getFileNames(blockFiles);
for (int i = 0; i < blockFiles.length; i++) {
if (!blockFiles[i].isDirectory()) {
// get each block in the rbwDir directory
Block block = FSDataset.getBlockFromNames(blockFiles, blockFileNames, i);
if (block != null) {
// add this block to block set
blockSet.add(block);
if (DataNode.LOG.isDebugEnabled()) {
DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block);
}
}
}
}
}
示例12: getBlockInfo
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Populate the given blockSet with any child blocks
* found at this node.
* @throws IOException
*/
public void getBlockInfo(LightWeightHashSet<Block> blockSet) throws IOException {
FSDir[] children = this.getChildren();
if (children != null) {
for (int i = 0; i < children.length; i++) {
children[i].getBlockInfo(blockSet);
}
}
File blockFiles[] = dir.listFiles();
String[] blockFilesNames = getFileNames(blockFiles);
for (int i = 0; i < blockFiles.length; i++) {
Block block = getBlockFromNames(blockFiles, blockFilesNames, i);
if (block != null) {
blockSet.add(block);
}
}
}
示例13: getBlockAndFileInfo
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Populate the given blockSet with any child blocks
* found at this node. With each block, return the full path
* of the block file.
* @throws IOException
*/
void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) throws IOException {
FSDir[] children = this.getChildren();
if (children != null) {
for (int i = 0; i < children.length; i++) {
children[i].getBlockAndFileInfo(blockSet);
}
}
File blockFiles[] = dir.listFiles();
String[] blockFilesNames = getFileNames(blockFiles);
for (int i = 0; i < blockFiles.length; i++) {
Block block = getBlockFromNames(blockFiles, blockFilesNames, i);
if (block != null) {
blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block));
}
}
}
示例14: add
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
* Add a block to the block collection
* which will be invalidated on the specified datanode.
*/
synchronized void add(final Block block, final DatanodeInfo datanode,
final boolean log) {
LightWeightHashSet<Block> set = node2blocks.get(datanode.getStorageID());
if (set == null) {
set = new LightWeightHashSet<Block>();
node2blocks.put(datanode.getStorageID(), set);
}
if (set.add(block)) {
numBlocks++;
if (log) {
NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+ ": add " + block + " to " + datanode);
}
}
}
示例15: dump
import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
final int size = node2blocks.values().size();
out.println("Metasave: Blocks " + numBlocks
+ " waiting deletion from " + size + " datanodes.");
if (size == 0) {
return;
}
for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
final LightWeightHashSet<Block> blocks = entry.getValue();
if (blocks.size() > 0) {
out.println(datanodeManager.getDatanode(entry.getKey()));
out.println(blocks);
}
}
}