本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.getBlocks方法的典型用法代码示例。如果您正苦于以下问题:Java BlocksWithLocations.getBlocks方法的具体用法?Java BlocksWithLocations.getBlocks怎么用?Java BlocksWithLocations.getBlocks使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations
的用法示例。
在下文中一共展示了BlocksWithLocations.getBlocks方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
boolean[] testSuite = new boolean[]{false, true};
for (int i = 0; i < testSuite.length; i++) {
BlockWithLocations[] list = new BlockWithLocations[]{
getBlockWithLocations(1, testSuite[i]),
getBlockWithLocations(2, testSuite[i])};
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int j = 0; j < blocks.length; j++) {
compare(blocks[j], blocks2[j]);
}
}
}
示例2: getBlockList
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
/**
* Fetch new blocks of this source from namenode and update this source's
* block list & {@link Dispatcher#globalBlocks}.
*
* @return the total size of the received blocks in the number of bytes.
*/
private long getBlockList() throws IOException {
final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);
long bytesReceived = 0;
for (BlockWithLocations blk : newBlocks.getBlocks()) {
bytesReceived += blk.getBlock().getNumBytes();
synchronized (globalBlocks) {
final DBlock block = globalBlocks.get(blk.getBlock());
synchronized (block) {
block.clearLocations();
// update locations
final String[] datanodeUuids = blk.getDatanodeUuids();
final StorageType[] storageTypes = blk.getStorageTypes();
for (int i = 0; i < datanodeUuids.length; i++) {
final StorageGroup g = storageGroupMap.get(
datanodeUuids[i], storageTypes[i]);
if (g != null) { // not unknown
block.addLocation(g);
}
}
}
if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
// filter bad candidates
srcBlocks.add(block);
}
}
}
return bytesReceived;
}
示例3: convert
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
.newBuilder();
for (BlockWithLocations b : blks.getBlocks()) {
builder.addBlocks(convert(b));
}
return builder.build();
}
示例4: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
BlockWithLocations[] list = new BlockWithLocations[] {
getBlockWithLocations(1), getBlockWithLocations(2) };
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int i = 0; i < blocks.length; i++) {
compare(blocks[i], blocks2[i]);
}
}
示例5: convert
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
BlocksWithLocationsProto.Builder builder =
BlocksWithLocationsProto.newBuilder();
for (BlockWithLocations b : blks.getBlocks()) {
builder.addBlocks(convert(b));
}
return builder.build();
}
示例6: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
BlockWithLocations[] list =
new BlockWithLocations[]{getBlockWithLocations(1),
getBlockWithLocations(2)};
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int i = 0; i < blocks.length; i++) {
compare(blocks[i], blocks2[i]);
}
}
示例7: getBlockList
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入方法依赖的package包/类
/**
* Fetch new blocks of this source from namenode and update this source's
* block list & {@link Dispatcher#globalBlocks}.
*
* @return the total size of the received blocks in the number of bytes.
*/
private long getBlockList() throws IOException {
final long size = Math.min(getBlocksSize, blocksToReceive);
final BlocksWithLocations newBlksLocs =
nnc.getBlocks(getDatanodeInfo(), size);
if (LOG.isTraceEnabled()) {
LOG.trace("getBlocks(" + getDatanodeInfo() + ", "
+ StringUtils.TraditionalBinaryPrefix.long2String(size, "B", 2)
+ ") returns " + newBlksLocs.getBlocks().length + " blocks.");
}
long bytesReceived = 0;
for (BlockWithLocations blkLocs : newBlksLocs.getBlocks()) {
// Skip small blocks.
if (blkLocs.getBlock().getNumBytes() < getBlocksMinBlockSize) {
continue;
}
DBlock block;
if (blkLocs instanceof StripedBlockWithLocations) {
StripedBlockWithLocations sblkLocs =
(StripedBlockWithLocations) blkLocs;
// approximate size
bytesReceived += sblkLocs.getBlock().getNumBytes() /
sblkLocs.getDataBlockNum();
block = new DBlockStriped(sblkLocs.getBlock(), sblkLocs.getIndices(),
sblkLocs.getDataBlockNum(), sblkLocs.getCellSize());
} else {
bytesReceived += blkLocs.getBlock().getNumBytes();
block = new DBlock(blkLocs.getBlock());
}
synchronized (globalBlocks) {
block = globalBlocks.putIfAbsent(blkLocs.getBlock(), block);
synchronized (block) {
block.clearLocations();
// update locations
final String[] datanodeUuids = blkLocs.getDatanodeUuids();
final StorageType[] storageTypes = blkLocs.getStorageTypes();
for (int i = 0; i < datanodeUuids.length; i++) {
final StorageGroup g = storageGroupMap.get(
datanodeUuids[i], storageTypes[i]);
if (g != null) { // not unknown
block.addLocation(g);
}
}
}
if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Add " + block + " to " + this);
}
srcBlocks.add(block);
}
}
}
return bytesReceived;
}