本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations类的典型用法代码示例。如果您正苦于以下问题:Java BlockWithLocations类的具体用法?Java BlockWithLocations怎么用?Java BlockWithLocations使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlockWithLocations类属于org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations包,在下文中一共展示了BlockWithLocations类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addBlock
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* return the length of the added block; 0 if the block is not added
*/
private long addBlock(Block block, List<BlockWithLocations> results) {
final List<DatanodeStorageInfo> locations = getValidLocations(block);
if(locations.size() == 0) {
return 0;
} else {
final String[] datanodeUuids = new String[locations.size()];
final String[] storageIDs = new String[datanodeUuids.length];
final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
for(int i = 0; i < locations.size(); i++) {
final DatanodeStorageInfo s = locations.get(i);
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
storageIDs[i] = s.getStorageID();
storageTypes[i] = s.getStorageType();
}
results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
storageTypes));
return block.getNumBytes();
}
}
示例2: getBlockWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
private static BlockWithLocations getBlockWithLocations(
int bid, boolean isStriped) {
final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
final String[] storageIDs = {"s1", "s2", "s3"};
final StorageType[] storageTypes = {
StorageType.DISK, StorageType.DISK, StorageType.DISK};
final byte[] indices = {0, 1, 2};
final short dataBlkNum = 6;
BlockWithLocations blkLocs = new BlockWithLocations(new Block(bid, 0, 1),
datanodeUuids, storageIDs, storageTypes);
if (isStriped) {
blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE);
}
return blkLocs;
}
示例3: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
boolean[] testSuite = new boolean[]{false, true};
for (int i = 0; i < testSuite.length; i++) {
BlockWithLocations[] list = new BlockWithLocations[]{
getBlockWithLocations(1, testSuite[i]),
getBlockWithLocations(2, testSuite[i])};
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int j = 0; j < blocks.length; j++) {
compare(blocks[j], blocks2[j]);
}
}
}
示例4: addBlock
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* return the length of the added block; 0 if the block is not added
*/
private long addBlock(Block block, List<BlockWithLocations> results) {
ArrayList<String> machineSet =
new ArrayList<String>(blocksMap.numNodes(block));
for (Iterator<DatanodeDescriptor> it =
blocksMap.nodeIterator(block); it.hasNext();) {
String storageID = it.next().getStorageID();
// filter invalidate replicas
LightWeightHashSet<Block> blocks = recentInvalidateSets.get(storageID);
if (blocks == null || !blocks.contains(block)) {
machineSet.add(storageID);
}
}
if (machineSet.size() == 0) {
return 0;
} else {
results.add(new BlockWithLocations(block,
machineSet.toArray(new String[machineSet.size()])));
return block.getNumBytes();
}
}
示例5: addBlock
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* return the length of the added block; 0 if the block is not added
*/
private long addBlock(Block block, List<BlockWithLocations> results) {
ArrayList<String> machineSet =
new ArrayList<String>(blocksMap.numNodes(block));
for(Iterator<DatanodeDescriptor> it =
blocksMap.nodeIterator(block); it.hasNext();) {
String storageID = it.next().getStorageID();
// filter invalidate replicas
Collection<Block> blocks = recentInvalidateSets.get(storageID);
if(blocks==null || !blocks.contains(block)) {
machineSet.add(storageID);
}
}
if(machineSet.size() == 0) {
return 0;
} else {
results.add(new BlockWithLocations(block,
machineSet.toArray(new String[machineSet.size()])));
return block.getNumBytes();
}
}
示例6: addBlock
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* return the length of the added block; 0 if the block is not added
*/
private long addBlock(Block block, List<BlockWithLocations> results) {
final List<DatanodeStorageInfo> locations = getValidLocations(block);
if(locations.size() == 0) {
return 0;
} else {
final String[] datanodeUuids = new String[locations.size()];
final String[] storageIDs = new String[datanodeUuids.length];
for(int i = 0; i < locations.size(); i++) {
final DatanodeStorageInfo s = locations.get(i);
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
storageIDs[i] = s.getStorageID();
}
results.add(new BlockWithLocations(block, datanodeUuids, storageIDs));
return block.getNumBytes();
}
}
示例7: getBlockList
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
* Fetch new blocks of this source from namenode and update this source's
* block list & {@link Dispatcher#globalBlocks}.
*
* @return the total size of the received blocks in the number of bytes.
*/
private long getBlockList() throws IOException {
final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);
long bytesReceived = 0;
for (BlockWithLocations blk : newBlocks.getBlocks()) {
bytesReceived += blk.getBlock().getNumBytes();
synchronized (globalBlocks) {
final DBlock block = globalBlocks.get(blk.getBlock());
synchronized (block) {
block.clearLocations();
// update locations
final String[] datanodeUuids = blk.getDatanodeUuids();
final StorageType[] storageTypes = blk.getStorageTypes();
for (int i = 0; i < datanodeUuids.length; i++) {
final StorageGroup g = storageGroupMap.get(
datanodeUuids[i], storageTypes[i]);
if (g != null) { // not unknown
block.addLocation(g);
}
}
}
if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
// filter bad candidates
srcBlocks.add(block);
}
}
}
return bytesReceived;
}
示例8: convert
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
return BlockWithLocationsProto.newBuilder()
.setBlock(convert(blk.getBlock()))
.addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
.addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
.addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
.build();
}
示例9: getBlockWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
private static BlockWithLocations getBlockWithLocations(int bid) {
final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
final String[] storageIDs = {"s1", "s2", "s3"};
final StorageType[] storageTypes = {
StorageType.DISK, StorageType.DISK, StorageType.DISK};
return new BlockWithLocations(new Block(bid, 0, 1),
datanodeUuids, storageIDs, storageTypes);
}
示例10: testConvertBlockWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlockWithLocations() {
BlockWithLocations locs = getBlockWithLocations(1);
BlockWithLocationsProto locsProto = PBHelper.convert(locs);
BlockWithLocations locs2 = PBHelper.convert(locsProto);
compare(locs, locs2);
}
示例11: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
BlockWithLocations[] list = new BlockWithLocations[] {
getBlockWithLocations(1), getBlockWithLocations(2) };
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int i = 0; i < blocks.length; i++) {
compare(blocks[i], blocks2[i]);
}
}
示例12: addBlock
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
/**
* Get all valid locations of the block & add the block to results
* @return the length of the added block; 0 if the block is not added. If the
* added block is a block group, return its approximate internal block size
*/
private long addBlock(BlockInfo block, List<BlockWithLocations> results) {
final List<DatanodeStorageInfo> locations = getValidLocations(block);
if(locations.size() == 0) {
return 0;
} else {
final String[] datanodeUuids = new String[locations.size()];
final String[] storageIDs = new String[datanodeUuids.length];
final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
for(int i = 0; i < locations.size(); i++) {
final DatanodeStorageInfo s = locations.get(i);
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
storageIDs[i] = s.getStorageID();
storageTypes[i] = s.getStorageType();
}
BlockWithLocations blkWithLocs = new BlockWithLocations(block,
datanodeUuids, storageIDs, storageTypes);
if(block.isStriped()) {
BlockInfoStriped blockStriped = (BlockInfoStriped) block;
byte[] indices = new byte[locations.size()];
for (int i = 0; i < locations.size(); i++) {
indices[i] =
(byte) blockStriped.getStorageBlockIndex(locations.get(i));
}
results.add(new StripedBlockWithLocations(blkWithLocs, indices,
blockStriped.getDataBlockNum(), blockStriped.getCellSize()));
// approximate size
return block.getNumBytes() / blockStriped.getDataBlockNum();
}else{
results.add(blkWithLocs);
return block.getNumBytes();
}
}
}
示例13: convert
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
BlockWithLocationsProto.Builder builder = BlockWithLocationsProto
.newBuilder().setBlock(PBHelperClient.convert(blk.getBlock()))
.addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
.addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
.addAllStorageTypes(PBHelperClient.convertStorageTypes(blk.getStorageTypes()));
if (blk instanceof StripedBlockWithLocations) {
StripedBlockWithLocations sblk = (StripedBlockWithLocations) blk;
builder.setIndices(PBHelperClient.getByteString(sblk.getIndices()));
builder.setDataBlockNum(sblk.getDataBlockNum());
builder.setCellSize(sblk.getCellSize());
}
return builder.build();
}
示例14: compare
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {
assertEquals(locs1.getBlock(), locs2.getBlock());
assertTrue(Arrays.equals(locs1.getStorageIDs(), locs2.getStorageIDs()));
if (locs1 instanceof StripedBlockWithLocations) {
assertTrue(Arrays.equals(((StripedBlockWithLocations) locs1).getIndices(),
((StripedBlockWithLocations) locs2).getIndices()));
}
}
示例15: testConvertBlockWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlockWithLocations() {
boolean[] testSuite = new boolean[]{false, true};
for (int i = 0; i < testSuite.length; i++) {
BlockWithLocations locs = getBlockWithLocations(1, testSuite[i]);
BlockWithLocationsProto locsProto = PBHelper.convert(locs);
BlockWithLocations locs2 = PBHelper.convert(locsProto);
compare(locs, locs2);
}
}