当前位置: 首页>>代码示例>>Java>>正文


Java BlockWithLocations类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations的典型用法代码示例。如果您正苦于以下问题:Java BlockWithLocations类的具体用法?Java BlockWithLocations怎么用?Java BlockWithLocations使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


BlockWithLocations类属于org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations包,在下文中一共展示了BlockWithLocations类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addBlock

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
        storageTypes));
    return block.getNumBytes();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:BlockManager.java

示例2: getBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
private static BlockWithLocations getBlockWithLocations(
    int bid, boolean isStriped) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  final byte[] indices = {0, 1, 2};
  final short dataBlkNum = 6;
  BlockWithLocations blkLocs = new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
  if (isStriped) {
    blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
        StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE);
  }
  return blkLocs;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:TestPBHelper.java

示例3: testConvertBlocksWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
  boolean[] testSuite = new boolean[]{false, true};
  for (int i = 0; i < testSuite.length; i++) {
    BlockWithLocations[] list = new BlockWithLocations[]{
        getBlockWithLocations(1, testSuite[i]),
        getBlockWithLocations(2, testSuite[i])};
    BlocksWithLocations locs = new BlocksWithLocations(list);
    BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
    BlocksWithLocations locs2 = PBHelper.convert(locsProto);
    BlockWithLocations[] blocks = locs.getBlocks();
    BlockWithLocations[] blocks2 = locs2.getBlocks();
    assertEquals(blocks.length, blocks2.length);
    for (int j = 0; j < blocks.length; j++) {
      compare(blocks[j], blocks2[j]);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:TestPBHelper.java

示例4: addBlock

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for (Iterator<DatanodeDescriptor> it =
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
    if (blocks == null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if (machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block,
      machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:FSNamesystem.java

示例5: addBlock

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  ArrayList<String> machineSet =
    new ArrayList<String>(blocksMap.numNodes(block));
  for(Iterator<DatanodeDescriptor> it = 
    blocksMap.nodeIterator(block); it.hasNext();) {
    String storageID = it.next().getStorageID();
    // filter invalidate replicas
    Collection<Block> blocks = recentInvalidateSets.get(storageID); 
    if(blocks==null || !blocks.contains(block)) {
      machineSet.add(storageID);
    }
  }
  if(machineSet.size() == 0) {
    return 0;
  } else {
    results.add(new BlockWithLocations(block, 
        machineSet.toArray(new String[machineSet.size()])));
    return block.getNumBytes();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:25,代码来源:FSNamesystem.java

示例6: addBlock

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
 * Get all valid locations of the block & add the block to results
 * return the length of the added block; 0 if the block is not added
 */
private long addBlock(Block block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
    }
    results.add(new BlockWithLocations(block, datanodeUuids, storageIDs));
    return block.getNumBytes();
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:21,代码来源:BlockManager.java

示例7: getBlockList

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
 * Fetch new blocks of this source from namenode and update this source's
 * block list & {@link Dispatcher#globalBlocks}.
 * 
 * @return the total size of the received blocks in the number of bytes.
 */
private long getBlockList() throws IOException {
  final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
  final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);

  long bytesReceived = 0;
  for (BlockWithLocations blk : newBlocks.getBlocks()) {
    bytesReceived += blk.getBlock().getNumBytes();
    synchronized (globalBlocks) {
      final DBlock block = globalBlocks.get(blk.getBlock());
      synchronized (block) {
        block.clearLocations();

        // update locations
        final String[] datanodeUuids = blk.getDatanodeUuids();
        final StorageType[] storageTypes = blk.getStorageTypes();
        for (int i = 0; i < datanodeUuids.length; i++) {
          final StorageGroup g = storageGroupMap.get(
              datanodeUuids[i], storageTypes[i]);
          if (g != null) { // not unknown
            block.addLocation(g);
          }
        }
      }
      if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
        // filter bad candidates
        srcBlocks.add(block);
      }
    }
  }
  return bytesReceived;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:Dispatcher.java

示例8: convert

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
  return BlockWithLocationsProto.newBuilder()
      .setBlock(convert(blk.getBlock()))
      .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
      .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
      .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
      .build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:PBHelper.java

示例9: getBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
private static BlockWithLocations getBlockWithLocations(int bid) {
  final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
  final String[] storageIDs = {"s1", "s2", "s3"};
  final StorageType[] storageTypes = {
      StorageType.DISK, StorageType.DISK, StorageType.DISK};
  return new BlockWithLocations(new Block(bid, 0, 1),
      datanodeUuids, storageIDs, storageTypes);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestPBHelper.java

示例10: testConvertBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlockWithLocations() {
  BlockWithLocations locs = getBlockWithLocations(1);
  BlockWithLocationsProto locsProto = PBHelper.convert(locs);
  BlockWithLocations locs2 = PBHelper.convert(locsProto);
  compare(locs, locs2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestPBHelper.java

示例11: testConvertBlocksWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
  BlockWithLocations[] list = new BlockWithLocations[] {
      getBlockWithLocations(1), getBlockWithLocations(2) };
  BlocksWithLocations locs = new BlocksWithLocations(list);
  BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
  BlocksWithLocations locs2 = PBHelper.convert(locsProto);
  BlockWithLocations[] blocks = locs.getBlocks();
  BlockWithLocations[] blocks2 = locs2.getBlocks();
  assertEquals(blocks.length, blocks2.length);
  for (int i = 0; i < blocks.length; i++) {
    compare(blocks[i], blocks2[i]);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestPBHelper.java

示例12: addBlock

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
 * Get all valid locations of the block & add the block to results
 * @return the length of the added block; 0 if the block is not added. If the
 * added block is a block group, return its approximate internal block size
 */
private long addBlock(BlockInfo block, List<BlockWithLocations> results) {
  final List<DatanodeStorageInfo> locations = getValidLocations(block);
  if(locations.size() == 0) {
    return 0;
  } else {
    final String[] datanodeUuids = new String[locations.size()];
    final String[] storageIDs = new String[datanodeUuids.length];
    final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
    for(int i = 0; i < locations.size(); i++) {
      final DatanodeStorageInfo s = locations.get(i);
      datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
      storageIDs[i] = s.getStorageID();
      storageTypes[i] = s.getStorageType();
    }
    BlockWithLocations blkWithLocs = new BlockWithLocations(block,
        datanodeUuids, storageIDs, storageTypes);
    if(block.isStriped()) {
      BlockInfoStriped blockStriped = (BlockInfoStriped) block;
      byte[] indices = new byte[locations.size()];
      for (int i = 0; i < locations.size(); i++) {
        indices[i] =
            (byte) blockStriped.getStorageBlockIndex(locations.get(i));
      }
      results.add(new StripedBlockWithLocations(blkWithLocs, indices,
          blockStriped.getDataBlockNum(), blockStriped.getCellSize()));
      // approximate size
      return block.getNumBytes() / blockStriped.getDataBlockNum();
    }else{
      results.add(blkWithLocs);
      return block.getNumBytes();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:BlockManager.java

示例13: convert

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
  BlockWithLocationsProto.Builder builder = BlockWithLocationsProto
      .newBuilder().setBlock(PBHelperClient.convert(blk.getBlock()))
      .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
      .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
      .addAllStorageTypes(PBHelperClient.convertStorageTypes(blk.getStorageTypes()));
  if (blk instanceof StripedBlockWithLocations) {
    StripedBlockWithLocations sblk = (StripedBlockWithLocations) blk;
    builder.setIndices(PBHelperClient.getByteString(sblk.getIndices()));
    builder.setDataBlockNum(sblk.getDataBlockNum());
    builder.setCellSize(sblk.getCellSize());
  }
  return builder.build();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:PBHelper.java

示例14: compare

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
  assertEquals(locs1.getBlock(), locs2.getBlock());
  assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
  if (locs1 instanceof StripedBlockWithLocations) {
    assertTrue(Arrays.equals(((StripedBlockWithLocations) locs1).getIndices(),
        ((StripedBlockWithLocations) locs2).getIndices()));
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:9,代码来源:TestPBHelper.java

示例15: testConvertBlockWithLocations

import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlockWithLocations() {
  boolean[] testSuite = new boolean[]{false, true};
  for (int i = 0; i < testSuite.length; i++) {
    BlockWithLocations locs = getBlockWithLocations(1, testSuite[i]);
    BlockWithLocationsProto locsProto = PBHelper.convert(locs);
    BlockWithLocations locs2 = PBHelper.convert(locsProto);
    compare(locs, locs2);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:11,代码来源:TestPBHelper.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。