当前位置: 首页>>代码示例>>Java>>正文


Java BlockWithLocations.getDatanodeUuids方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations.getDatanodeUuids方法的典型用法代码示例。如果您正苦于以下问题:Java BlockWithLocations.getDatanodeUuids方法的具体用法?Java BlockWithLocations.getDatanodeUuids怎么用?Java BlockWithLocations.getDatanodeUuids使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations的用法示例。


在下文中一共展示了BlockWithLocations.getDatanodeUuids方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getBlockList

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入方法依赖的package包/类
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
  final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);

  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks.getBlocks()) {
    bytesReceived += blk.getBlock().getNumBytes();
    synchronized (globalBlocks) {
      final DBlock block = globalBlocks.get(blk.getBlock());
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blk.getDatanodeUuids();
        final StorageType[] storageTypes = blk.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:Dispatcher.java

示例2: getBlockList

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入方法依赖的package包/类
private long getBlockList() throws IOException {
  BlockWithLocations[] newBlocks = nnc.namenode.getBlocks(datanode, 
    Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive)).getBlocks();
  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks) {
    bytesReceived += blk.getBlock().getNumBytes();
    BalancerBlock block;
    synchronized(globalBlockList) {
      block = globalBlockList.get(blk.getBlock());
      if (block==null) {
        block = new BalancerBlock(blk.getBlock());
        globalBlockList.put(blk.getBlock(), block);
      } else {
        block.clearLocations();
      }
    
      synchronized (block) {
        // update locations
        for (String datanodeUuid : blk.getDatanodeUuids()) {
          final BalancerDatanode d = datanodeMap.get(datanodeUuid);
          if (datanode != null) { // not an unknown datanode
            block.addLocation(d);
          }
        }
      }
      if (!srcBlockList.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlockList.add(block);
      }
    }
  }
  return bytesReceived;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:34,代码来源:Balancer.java

示例3: getBlockList

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入方法依赖的package包/类
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(getBlocksSize, blocksToReceive);
  final BlocksWithLocations newBlksLocs =
      nnc.getBlocks(getDatanodeInfo(), size);

  if (LOG.isTraceEnabled()) {
    LOG.trace("getBlocks(" + getDatanodeInfo() + ", "
        + StringUtils.TraditionalBinaryPrefix.long2String(size, "B", 2)
        + ") returns " + newBlksLocs.getBlocks().length + " blocks.");
  }

  long bytesReceived = 0;
  for (BlockWithLocations blkLocs : newBlksLocs.getBlocks()) {
    // Skip small blocks.
    if (blkLocs.getBlock().getNumBytes() < getBlocksMinBlockSize) {
      continue;
    }

    DBlock block;
    if (blkLocs instanceof StripedBlockWithLocations) {
      StripedBlockWithLocations sblkLocs =
          (StripedBlockWithLocations) blkLocs;
      // approximate size
      bytesReceived += sblkLocs.getBlock().getNumBytes() /
          sblkLocs.getDataBlockNum();
      block = new DBlockStriped(sblkLocs.getBlock(), sblkLocs.getIndices(),
          sblkLocs.getDataBlockNum(), sblkLocs.getCellSize());
    } else {
      bytesReceived += blkLocs.getBlock().getNumBytes();
      block = new DBlock(blkLocs.getBlock());
    }

    synchronized (globalBlocks) {
      block = globalBlocks.putIfAbsent(blkLocs.getBlock(), block);
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blkLocs.getDatanodeUuids();
        final StorageType[] storageTypes = blkLocs.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        if (LOG.isTraceEnabled()) {
          LOG.trace("Add " + block + " to " + this);
        }
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:65,代码来源:Dispatcher.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations.getDatanodeUuids方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。