本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations类的典型用法代码示例。如果您正苦于以下问题:Java BlocksWithLocations类的具体用法?Java BlocksWithLocations怎么用?Java BlocksWithLocations使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BlocksWithLocations类属于org.apache.hadoop.hdfs.server.protocol包,在下文中一共展示了BlocksWithLocations类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
boolean[] testSuite = new boolean[]{false, true};
for (int i = 0; i < testSuite.length; i++) {
BlockWithLocations[] list = new BlockWithLocations[]{
getBlockWithLocations(1, testSuite[i]),
getBlockWithLocations(2, testSuite[i])};
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int j = 0; j < blocks.length; j++) {
compare(blocks[j], blocks2[j]);
}
}
}
示例2: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override // NamenodeProtocol
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
if(size <= 0) {
throw new IllegalArgumentException(
"Unexpected not positive size: "+size);
}
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.getBlockManager().getBlocks(datanode, size);
}
示例3: getBlockList
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
/**
* Fetch new blocks of this source from namenode and update this source's
* block list & {@link Dispatcher#globalBlocks}.
*
* @return the total size of the received blocks in the number of bytes.
*/
private long getBlockList() throws IOException {
final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);
long bytesReceived = 0;
for (BlockWithLocations blk : newBlocks.getBlocks()) {
bytesReceived += blk.getBlock().getNumBytes();
synchronized (globalBlocks) {
final DBlock block = globalBlocks.get(blk.getBlock());
synchronized (block) {
block.clearLocations();
// update locations
final String[] datanodeUuids = blk.getDatanodeUuids();
final StorageType[] storageTypes = blk.getStorageTypes();
for (int i = 0; i < datanodeUuids.length; i++) {
final StorageGroup g = storageGroupMap.get(
datanodeUuids[i], storageTypes[i]);
if (g != null) { // not unknown
block.addLocation(g);
}
}
}
if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
// filter bad candidates
srcBlocks.add(block);
}
}
}
return bytesReceived;
}
示例4: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
/**
* return a list of blocks & their locations on <code>datanode</code> whose
* total size is <code>size</code>
*
* @param datanode on which blocks are located
* @param size total size of blocks
*/
public BlocksWithLocations getBlocks(DatanodeID datanode, long size
) throws IOException {
namesystem.checkOperation(OperationCategory.READ);
namesystem.readLock();
try {
namesystem.checkOperation(OperationCategory.READ);
return getBlocksWithLocations(datanode, size);
} finally {
namesystem.readUnlock();
}
}
示例5: convert
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
.newBuilder();
for (BlockWithLocations b : blks.getBlocks()) {
builder.addBlocks(convert(b));
}
return builder.build();
}
示例6: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override
public GetBlocksResponseProto getBlocks(RpcController unused,
GetBlocksRequestProto request) throws ServiceException {
DatanodeInfo dnInfo = new DatanodeInfo(PBHelper.convert(request
.getDatanode()));
BlocksWithLocations blocks;
try {
blocks = impl.getBlocks(dnInfo, request.getSize());
} catch (IOException e) {
throw new ServiceException(e);
}
return GetBlocksResponseProto.newBuilder()
.setBlocks(PBHelper.convert(blocks)).build();
}
示例7: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
.setDatanode(PBHelper.convert((DatanodeID)datanode)).setSize(size)
.build();
try {
return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)
.getBlocks());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
示例8: testConvertBlocksWithLocations
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Test
public void testConvertBlocksWithLocations() {
BlockWithLocations[] list = new BlockWithLocations[] {
getBlockWithLocations(1), getBlockWithLocations(2) };
BlocksWithLocations locs = new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto = PBHelper.convert(locs);
BlocksWithLocations locs2 = PBHelper.convert(locsProto);
BlockWithLocations[] blocks = locs.getBlocks();
BlockWithLocations[] blocks2 = locs2.getBlocks();
assertEquals(blocks.length, blocks2.length);
for (int i = 0; i < blocks.length; i++) {
compare(blocks[i], blocks2[i]);
}
}
示例9: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override
public GetBlocksResponseProto getBlocks(RpcController unused,
GetBlocksRequestProto request) throws ServiceException {
DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request
.getDatanode()));
BlocksWithLocations blocks;
try {
blocks = impl.getBlocks(dnInfo, request.getSize());
} catch (IOException e) {
throw new ServiceException(e);
}
return GetBlocksResponseProto.newBuilder()
.setBlocks(PBHelper.convert(blocks)).build();
}
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:NamenodeProtocolServerSideTranslatorPB.java
示例10: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
.setDatanode(PBHelperClient.convert((DatanodeID)datanode)).setSize(size)
.build();
try {
return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)
.getBlocks());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
示例11: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override
public BlocksWithLocations getBlocks(final DatanodeInfo datanode,
final long size) throws IOException {
return (failoverHandler.new ImmutableFSCaller<BlocksWithLocations>() {
@Override
public BlocksWithLocations call() throws IOException {
return namenode.getBlocks(datanode, size);
}
}).callFS();
}
示例12: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
/**
* return a list of blocks & their locations on <code>datanode</code> whose
* total size is <code>size</code>
*
* @param datanode on which blocks are located
* @param size total size of blocks
*/
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
if(size <= 0) {
throw new IllegalArgumentException(
"Unexpected not positive size: "+size);
}
return namesystem.getBlocks(datanode, size);
}
示例13: getBlocks
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; //导入依赖的package包/类
@Override // NamenodeProtocol
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
if(size <= 0) {
throw new IllegalArgumentException(
"Unexpected not positive size: "+size);
}
namesystem.checkSuperuserPrivilege();
return namesystem.getBlockManager().getBlocks(datanode, size);
}