本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo类的典型用法代码示例。如果您正苦于以下问题:Java BlockInfo类的具体用法?Java BlockInfo怎么用?Java BlockInfo使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
BlockInfo类属于org.apache.hadoop.hdfs.server.namenode.BlocksMap包,在下文中一共展示了BlockInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: generateINode
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
private INode generateINode(long inodeId) {
return new INode(inodeId, new PermissionStatus("", "", new FsPermission((short) 0)), 0, 0) {
@Override
long[] computeContentSummary(long[] summary) {
return null;
}
@Override
DirCounts spaceConsumedInTree(DirCounts counts) {
return null;
}
@Override
public boolean isDirectory() {
return false;
}
@Override
int collectSubtreeBlocksAndClear(List<BlockInfo> v,
int blocksLimit,
List<INode> removedINodes) {
return 0;
}
};
}
示例2: filterMapWithInode
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
private void filterMapWithInode(INode node) {
// Must NOT filter with files in WaitingRoom already!
if (node.getFullPathName().startsWith(wrDir)) return;
LOG.info("Filtering WaitingRoomMaps with inode " + node.getFullPathName());
if (node.isDirectory()) {
INodeDirectory dir = (INodeDirectory) node;
for (INode child: dir.getChildren()) {
filterMapWithInode(child);
}
} else {
BlockInfo[] blocks = ((INodeFile)node).getBlocks();
// Mark all blocks of this file as referenced
for (BlockInfo block: blocks) {
blockRefMap.remove(block.getBlockId());
}
}
}
示例3: getAllLocatedBlocks
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
private void getAllLocatedBlocks(INode inode,
List<LocatedBlocksWithMetaInfo> blocks)
throws IOException {
if (inode.isDirectory()) {
INodeDirectory dir = (INodeDirectory) inode;
for (INode child: dir.getChildren()) {
getAllLocatedBlocks(child, blocks);
}
} else {
INodeFile file = (INodeFile) inode;
BlockInfo[] fileBlocks = file.getBlocks();
List<LocatedBlock> lb = new ArrayList<LocatedBlock>();
for (BlockInfo block: fileBlocks) {
// DatanodeInfo is unavailable, so set as empty for now
lb.add(new LocatedBlock(block, new DatanodeInfo[0]));
}
LocatedBlocks locatedBlocks = new LocatedBlocks(
file.computeContentSummary().getLength(), // flength
lb, // blks
false); // isUnderConstruction
// Update DatanodeInfo from NN
blocks.add(namenode.updateDatanodeInfo(locatedBlocks));
}
}
示例4: getParityBlocks
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
public BlockInfo[] getParityBlocks(BlockInfo[] blocks) {
int numBlocks = (blocks.length / numStripeBlocks) * numParityBlocks
+ ((blocks.length % numStripeBlocks == 0) ? 0 : numParityBlocks);
BlockInfo[] parityBlocks = new BlockInfo[numBlocks];
int pos = 0;
int parityEnd = numParityBlocks;
for (int i = 0; i < numBlocks; i++) {
parityBlocks[i] = blocks[pos];
pos++;
if (pos == parityEnd) {
pos += numDataBlocks;
parityEnd += numStripeBlocks;
}
}
return parityBlocks;
}
示例5: checkRaidProgress
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
/**
* Count the number of live replicas of each parity block in the raided file
* If any stripe has not enough parity block replicas, add the stripe to
* raidEncodingTasks to schedule encoding.
* If forceAdd is true, we always add the stripe to raidEncodingTasks
* without checking
* @param sourceINode
* @param raidTasks
* @param fs
* @param forceAdd
* @return true if all parity blocks of the file have enough replicas
* @throws IOException
*/
public boolean checkRaidProgress(INodeFile sourceINode,
LightWeightLinkedSet<RaidBlockInfo> raidEncodingTasks, FSNamesystem fs,
boolean forceAdd) throws IOException {
boolean result = true;
BlockInfo[] blocks = sourceINode.getBlocks();
for (int i = 0; i < blocks.length;
i += numStripeBlocks) {
boolean hasParity = true;
if (!forceAdd) {
for (int j = 0; j < numParityBlocks; j++) {
if (fs.countLiveNodes(blocks[i + j]) < this.parityReplication) {
hasParity = false;
break;
}
}
}
if (!hasParity || forceAdd) {
raidEncodingTasks.add(new RaidBlockInfo(blocks[i], parityReplication, i));
result = false;
}
}
return result;
}
示例6: appendBlocks
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
@Override
public void appendBlocks(INodeFile [] inodes, int totalAddedBlocks, INodeFile inode) {
int size = this.blocks.length;
BlockInfo[] newlist = new BlockInfo[size + totalAddedBlocks];
System.arraycopy(this.blocks, 0, newlist, 0, size);
for(INodeFile in: inodes) {
BlockInfo[] blks = in.storage.getBlocks();
System.arraycopy(blks, 0, newlist, size, blks.length);
size += blks.length;
}
this.blocks = newlist;
for(BlockInfo bi: this.blocks) {
bi.setINode(inode);
}
}
示例7: listMoveToHead
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
/**
* Remove block from the list and insert
* into the head of the list of blocks
* related to the specified DatanodeDescriptor.
* If the head is null then form a new list.
* @return current block as the new head of the list.
*/
protected BlockInfo listMoveToHead(BlockInfo block, BlockInfo head,
DatanodeIndex indexes) {
assert head != null : "Head can not be null";
if (head == block) {
return head;
}
BlockInfo next = block.getSetNext(indexes.currentIndex, head);
BlockInfo prev = block.getSetPrevious(indexes.currentIndex, null);
head.setPrevious(indexes.headIndex, block);
indexes.headIndex = indexes.currentIndex;
prev.setNext(prev.findDatanode(this), next);
if (next != null)
next.setPrevious(next.findDatanode(this), prev);
return block;
}
示例8: isSourceBlock
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
@Override
public boolean isSourceBlock(BlockInfo block) {
int index = 0;
if (block instanceof RaidBlockInfo) {
RaidBlockInfo rbi = (RaidBlockInfo)block;
index = rbi.index;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("block: " + block + " is not raid block info");
}
for (index = 0; index < blocks.length; index++) {
if (blocks[index].equals(block)) {
break;
}
}
if (index == blocks.length) {
return false;
}
}
return index % codec.numStripeBlocks >= codec.numParityBlocks;
}
示例9: updateNeededReplicationQueue
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
/**
* Update a block's priority queue in neededReplicaiton queues
*
* @param blockInfo blockInfo
* @param delta the change of number of replicas
* @param numCurrentReplicas current number of replicas
* @param numCurrentDecommissionedReplicas current number of decommissioed replicas
* @param node the node where the replica resides
* @param fileReplication expected number of replicas
*/
private void updateNeededReplicationQueue(BlockInfo blockInfo, int delta,
int numCurrentReplicas, int numCurrentDecommissionedReplicas,
DatanodeDescriptor node, short fileReplication) {
int numOldReplicas = numCurrentReplicas;
int numOldDecommissionedReplicas = numCurrentDecommissionedReplicas;
if (node.isDecommissioned() || node.isDecommissionInProgress()) {
numOldDecommissionedReplicas -= delta;
} else {
numOldReplicas -= delta;
}
if (fileReplication > numOldReplicas) {
neededReplications.remove(blockInfo, numOldReplicas,
numOldDecommissionedReplicas, fileReplication);
}
if (fileReplication > numCurrentReplicas) {
neededReplications.add(blockInfo, numCurrentReplicas,
numCurrentDecommissionedReplicas, fileReplication);
}
}
示例10: set
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
public void set(long inodeId,
String path,
short replication,
long mtime,
long atime,
long blockSize,
BlockInfo[] blocks,
PermissionStatus permissions,
String clientName,
String clientMachine) {
this.inodeId = inodeId;
this.path = path;
this.replication = replication;
this.mtime = mtime;
this.atime = atime;
this.blockSize = blockSize;
this.blocks = blocks;
this.permissions = permissions;
this.clientName = clientName;
this.clientMachine = clientMachine;
}
示例11: insertIntoList
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
/**
* Adds blocks already connected into list, to this descriptor's blocks.
* The blocks in the input list already have this descriptor inserted to them.
* Used for parallel initial block reports.
*/
void insertIntoList(BlockInfo head, int headIndex, BlockInfo tail, int tailIndex, int count) {
if (head == null)
return;
// connect tail to now-head
tail.setNext(tailIndex, blockList);
if (blockList != null)
blockList.setPrevious(blockList.findDatanode(this), tail);
// create new head
blockList = head;
blockList.setPrevious(headIndex, null);
// add new blocks to the count
numOfBlocks += count;
}
示例12: getBlockInfoInternal
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
private LocatedBlockWithFileName getBlockInfoInternal(long blockId)
throws IOException {
Block block = new Block(blockId);
BlockInfo blockInfo = namesystem.blocksMap.getBlockInfo(block);
if (null == blockInfo) {
return null;
}
INodeFile inode = blockInfo.getINode();
if (null == inode) {
return null;
}
String fileName = inode.getFullPathName();
// get the location info
List<DatanodeInfo> diList = new ArrayList<DatanodeInfo>();
for (Iterator<DatanodeDescriptor> it
= namesystem.blocksMap.nodeIterator(block); it.hasNext();) {
diList.add(it.next());
}
return new LocatedBlockWithFileName(block,
diList.toArray(new DatanodeInfo[] {}), fileName);
}
示例13: ReplicationWork
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
public ReplicationWork(BlockInfo block,
INodeFile fileINode,
int numOfReplicas,
DatanodeDescriptor srcNode,
List<DatanodeDescriptor> containingNodes,
int priority){
this.block = block;
this.blockSize = block.getNumBytes();
this.fileINode = fileINode;
this.numOfReplicas = numOfReplicas;
this.srcNode = srcNode;
this.containingNodes = containingNodes;
this.priority = priority;
this.targets = null;
}
示例14: getBlockIndex
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
int getBlockIndex(Block blk, String file) throws IOException {
BlockInfo[] blocks = getBlocks();
if (blocks == null) {
throw new IOException("blocks is null for file " + file);
}
// null indicates that this block is currently added. Return size()
// as the index in this case
if (blk == null) {
return blocks.length;
}
for (int curBlk = 0; curBlk < blocks.length; curBlk++) {
if (blocks[curBlk].equals(blk)) {
return curBlk;
}
}
throw new IOException("Cannot locate " + blk + " in file " + file);
}
示例15: metaSave
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入依赖的package包/类
/**
* Iterate through all items and print them.
*/
void metaSave(PrintWriter out) {
synchronized (pendingReplications) {
out.println("Metasave: Blocks being replicated: " +
pendingReplications.size());
Iterator<Map.Entry<BlockInfo, PendingBlockInfo>> iter =
pendingReplications.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<BlockInfo, PendingBlockInfo> entry = iter.next();
PendingBlockInfo pendingBlock = entry.getValue();
BlockInfo block = entry.getKey();
out.println(block +
" StartTime: " + new Time(pendingBlock.timeStamp) +
" NumReplicaInProgress: " +
pendingBlock.numReplicasInProgress);
}
}
}