当前位置: 首页>>代码示例>>Java>>正文


Java BlocksWithLocations.getBlocks方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.getBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java BlocksWithLocations.getBlocks方法的具体用法?Java BlocksWithLocations.getBlocks怎么用?Java BlocksWithLocations.getBlocks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations的用法示例。


在下文中一共展示了BlocksWithLocations.getBlocks方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testConvertBlocksWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
  boolean[] testSuite = new boolean[]{false, true};
  for (int i = 0; i < testSuite.length; i++) {
    BlockWithLocations[] list = new BlockWithLocations[]{
        getBlockWithLocations(1, testSuite[i]),
        getBlockWithLocations(2, testSuite[i])};
    BlocksWithLocations locs = new BlocksWithLocations(list);
    BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
    BlocksWithLocations locs2 = PBHelper.convert(locsProto);
    BlockWithLocations[] blocks = locs.getBlocks();
    BlockWithLocations[] blocks2 = locs2.getBlocks();
    assertEquals(blocks.length, blocks2.length);
    for (int j = 0; j < blocks.length; j++) {
      compare(blocks[j], blocks2[j]);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestPBHelper.java

示例2: getBlockList

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
  final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);

  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks.getBlocks()) {
    bytesReceived += blk.getBlock().getNumBytes();
    synchronized (globalBlocks) {
      final DBlock block = globalBlocks.get(blk.getBlock());
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blk.getDatanodeUuids();
        final StorageType[] storageTypes = blk.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:Dispatcher.java

示例3: convert

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
  BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
      .newBuilder();
  for (BlockWithLocations b : blks.getBlocks()) {
    builder.addBlocks(convert(b));
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:PBHelper.java

示例4: testConvertBlocksWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
  BlockWithLocations[] list = new BlockWithLocations[] {
      getBlockWithLocations(1), getBlockWithLocations(2) };
  BlocksWithLocations locs = new BlocksWithLocations(list);
  BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
  BlocksWithLocations locs2 = PBHelper.convert(locsProto);
  BlockWithLocations[] blocks = locs.getBlocks();
  BlockWithLocations[] blocks2 = locs2.getBlocks();
  assertEquals(blocks.length, blocks2.length);
  for (int i = 0; i < blocks.length; i++) {
    compare(blocks[i], blocks2[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestPBHelper.java

示例5: convert

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
  BlocksWithLocationsProto.Builder builder =
      BlocksWithLocationsProto.newBuilder();
  for (BlockWithLocations b : blks.getBlocks()) {
    builder.addBlocks(convert(b));
  }
  return builder.build();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:9,代码来源:PBHelper.java

示例6: testConvertBlocksWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
  BlockWithLocations[] list =
      new BlockWithLocations[]{getBlockWithLocations(1),
          getBlockWithLocations(2)};
  BlocksWithLocations locs = new BlocksWithLocations(list);
  BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
  BlocksWithLocations locs2 = PBHelper.convert(locsProto);
  BlockWithLocations[] blocks = locs.getBlocks();
  BlockWithLocations[] blocks2 = locs2.getBlocks();
  assertEquals(blocks.length, blocks2.length);
  for (int i = 0; i < blocks.length; i++) {
    compare(blocks[i], blocks2[i]);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:16,代码来源:TestPBHelper.java

示例7: getBlockList

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(getBlocksSize, blocksToReceive);
  final BlocksWithLocations newBlksLocs =
      nnc.getBlocks(getDatanodeInfo(), size);

  if (LOG.isTraceEnabled()) {
    LOG.trace("getBlocks(" + getDatanodeInfo() + ", "
        + StringUtils.TraditionalBinaryPrefix.long2String(size, "B", 2)
        + ") returns " + newBlksLocs.getBlocks().length + " blocks.");
  }

  long bytesReceived = 0;
  for (BlockWithLocations blkLocs : newBlksLocs.getBlocks()) {
    // Skip small blocks.
    if (blkLocs.getBlock().getNumBytes() < getBlocksMinBlockSize) {
      continue;
    }

    DBlock block;
    if (blkLocs instanceof StripedBlockWithLocations) {
      StripedBlockWithLocations sblkLocs =
          (StripedBlockWithLocations) blkLocs;
      // approximate size
      bytesReceived += sblkLocs.getBlock().getNumBytes() /
          sblkLocs.getDataBlockNum();
      block = new DBlockStriped(sblkLocs.getBlock(), sblkLocs.getIndices(),
          sblkLocs.getDataBlockNum(), sblkLocs.getCellSize());
    } else {
      bytesReceived += blkLocs.getBlock().getNumBytes();
      block = new DBlock(blkLocs.getBlock());
    }

    synchronized (globalBlocks) {
      block = globalBlocks.putIfAbsent(blkLocs.getBlock(), block);
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blkLocs.getDatanodeUuids();
        final StorageType[] storageTypes = blkLocs.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        if (LOG.isTraceEnabled()) {
          LOG.trace("Add " + block + " to " + this);
        }
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:65,代码来源:Dispatcher.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.getBlocks方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。