当前位置: 首页>>代码示例>>Java>>正文


Java LightWeightHashSet类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.util.LightWeightHashSet的典型用法代码示例。如果您正苦于以下问题:Java LightWeightHashSet类的具体用法?Java LightWeightHashSet怎么用?Java LightWeightHashSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


LightWeightHashSet类属于org.apache.hadoop.hdfs.util包,在下文中一共展示了LightWeightHashSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: add

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* {}: add {} to {}",
          getClass().getSimpleName(), block, datanode);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:InvalidateBlocks.java

示例2: dump

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(entry.getKey());
      out.println(blocks);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:InvalidateBlocks.java

示例3: removeFromExcessReplicateMap

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * If a block is removed from blocksMap, remove it from excessReplicateMap.
 */
private void removeFromExcessReplicateMap(BlockInfo block) {
  for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
    String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
    LightWeightHashSet<BlockInfo> excessReplicas =
        excessReplicateMap.get(uuid);
    if (excessReplicas != null) {
      if (excessReplicas.remove(block)) {
        excessBlocksCount.decrementAndGet();
        if (excessReplicas.isEmpty()) {
          excessReplicateMap.remove(uuid);
        }
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:BlockManager.java

示例4: add

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode);
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode, set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.debug("BLOCK* {}: add {} to {}",
          getClass().getSimpleName(), block, datanode);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:InvalidateBlocks.java

示例5: addBlock

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for (Iterator<DatanodeDescriptor> it =
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
    if (blocks == null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if (machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block,
      machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:FSNamesystem.java

示例6: addToInvalidatesNoLog

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Adds block to list of blocks which will be invalidated on
 * specified datanode
 *
 * @param b block
 * @param n datanode
 */
void addToInvalidatesNoLog(Block b, DatanodeInfo n, boolean ackRequired) {
  // We are the standby avatar and we don't want to add blocks to the
  // invalidates list.
  if (this.getNameNode().shouldRetryAbsentBlocks()) {
    return;
  }

  LightWeightHashSet<Block> invalidateSet = recentInvalidateSets.get(n
      .getStorageID());
  if (invalidateSet == null) {
    invalidateSet = new LightWeightHashSet<Block>();
    recentInvalidateSets.put(n.getStorageID(), invalidateSet);
  }
  if(!ackRequired){
    b.setNumBytes(BlockFlags.NO_ACK);
  }
  if (invalidateSet.add(b)) {
    pendingDeletionBlocksCount++;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:28,代码来源:FSNamesystem.java

示例7: dumpExcessReplicasSets

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * dumps the contents of recentInvalidateSets
 */
void dumpExcessReplicasSets(PrintWriter out) {
  int size = excessReplicateMap.values().size();
  out.println("Metasave: Excess blocks " + excessBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : excessReplicateMap
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName());
      blocks.printDetails(out);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:FSNamesystem.java

示例8: dumpRecentInvalidateSets

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * dumps the contents of recentInvalidateSets
 */
private void dumpRecentInvalidateSets(PrintWriter out) {
  int size = recentInvalidateSets.values().size();
  out.println("Metasave: Blocks " + pendingDeletionBlocksCount
    + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }
  for (Map.Entry<String, LightWeightHashSet<Block>> entry : recentInvalidateSets
      .entrySet()) {
    LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeMap.get(entry.getKey()).getName() + blocks);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:FSNamesystem.java

示例9: clearReplicationQueues

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Clear replication queues. This is used by standby avatar to reclaim memory.
 */
void clearReplicationQueues() {
  writeLock();
  try {
    synchronized (neededReplications) {
      neededReplications.clear();
    }
    underReplicatedBlocksCount = 0;

    corruptReplicas.clear();
    corruptReplicaBlocksCount = 0;

    overReplicatedBlocks.clear();
    raidEncodingTasks.clear();

    excessReplicateMap = new HashMap<String, LightWeightHashSet<Block>>();
    excessBlocksCount = 0;
  } finally {
    writeUnlock();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:FSNamesystem.java

示例10: scanNamespace

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
void scanNamespace() {
  startNewPeriod();
  // Create a new processedBlocks structure
  processedBlocks = new LightWeightHashSet<Long>();
  if (verificationLog != null) {
    try {
      verificationLog.openCurFile();
    } catch (FileNotFoundException ex) {
      LOG.warn("Could not open current file");
    }
  }
  if (!assignInitialVerificationTimes()) {
    return;
  }
  // Start scanning
  scan();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:DataBlockScanner.java

示例11: getBlocksBeingWrittenInfo

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) throws IOException { 
  if (rbwDir == null) {
    return;
  }
 
  File[] blockFiles = rbwDir.listFiles();
  if (blockFiles == null) {
    return;
  }
  String[] blockFileNames = getFileNames(blockFiles);  
  for (int i = 0; i < blockFiles.length; i++) {
    if (!blockFiles[i].isDirectory()) {
    // get each block in the rbwDir directory
      Block block = FSDataset.getBlockFromNames(blockFiles, blockFileNames, i);
      if (block != null) {
        // add this block to block set
        blockSet.add(block);
        if (DataNode.LOG.isDebugEnabled()) {
          DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block);
        }            
      }
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:FSDataset.java

示例12: getBlockInfo

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Populate the given blockSet with any child blocks
 * found at this node.
 * @throws IOException 
 */
public void getBlockInfo(LightWeightHashSet<Block> blockSet) throws IOException {
  FSDir[] children = this.getChildren();
  if (children != null) {
    for (int i = 0; i < children.length; i++) {
      children[i].getBlockInfo(blockSet);
    }
  }

  File blockFiles[] = dir.listFiles();
  String[] blockFilesNames = getFileNames(blockFiles);
  
  for (int i = 0; i < blockFiles.length; i++) {
    Block block = getBlockFromNames(blockFiles, blockFilesNames, i);
    if (block != null) {
      blockSet.add(block);
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:FSDataset.java

示例13: getBlockAndFileInfo

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Populate the given blockSet with any child blocks
 * found at this node. With each block, return the full path
 * of the block file.
 * @throws IOException 
 */
void getBlockAndFileInfo(LightWeightHashSet<BlockAndFile> blockSet) throws IOException {
  FSDir[] children = this.getChildren();
  if (children != null) {
    for (int i = 0; i < children.length; i++) {
      children[i].getBlockAndFileInfo(blockSet);
    }
  }

  File blockFiles[] = dir.listFiles();
  String[] blockFilesNames = getFileNames(blockFiles);      
  for (int i = 0; i < blockFiles.length; i++) {
    Block block = getBlockFromNames(blockFiles, blockFilesNames, i);
    if (block != null) {
      blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block));
    }
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:FSDataset.java

示例14: add

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/**
 * Add a block to the block collection
 * which will be invalidated on the specified datanode.
 */
synchronized void add(final Block block, final DatanodeInfo datanode,
    final boolean log) {
  LightWeightHashSet<Block> set = node2blocks.get(datanode.getStorageID());
  if (set == null) {
    set = new LightWeightHashSet<Block>();
    node2blocks.put(datanode.getStorageID(), set);
  }
  if (set.add(block)) {
    numBlocks++;
    if (log) {
      NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
          + ": add " + block + " to " + datanode);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:InvalidateBlocks.java

示例15: dump

import org.apache.hadoop.hdfs.util.LightWeightHashSet; //导入依赖的package包/类
/** Print the contents to out. */
synchronized void dump(final PrintWriter out) {
  final int size = node2blocks.values().size();
  out.println("Metasave: Blocks " + numBlocks 
      + " waiting deletion from " + size + " datanodes.");
  if (size == 0) {
    return;
  }

  for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
    final LightWeightHashSet<Block> blocks = entry.getValue();
    if (blocks.size() > 0) {
      out.println(datanodeManager.getDatanode(entry.getKey()));
      out.println(blocks);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:InvalidateBlocks.java


注:本文中的org.apache.hadoop.hdfs.util.LightWeightHashSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。