本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo.getINode方法的典型用法代码示例。如果您正苦于以下问题:Java BlockInfo.getINode方法的具体用法?Java BlockInfo.getINode怎么用?Java BlockInfo.getINode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo
的用法示例。
在下文中一共展示了BlockInfo.getINode方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getBlockInfoInternal
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
private LocatedBlockWithFileName getBlockInfoInternal(long blockId)
throws IOException {
Block block = new Block(blockId);
BlockInfo blockInfo = namesystem.blocksMap.getBlockInfo(block);
if (null == blockInfo) {
return null;
}
INodeFile inode = blockInfo.getINode();
if (null == inode) {
return null;
}
String fileName = inode.getFullPathName();
// get the location info
List<DatanodeInfo> diList = new ArrayList<DatanodeInfo>();
for (Iterator<DatanodeDescriptor> it
= namesystem.blocksMap.nodeIterator(block); it.hasNext();) {
diList.add(it.next());
}
return new LocatedBlockWithFileName(block,
diList.toArray(new DatanodeInfo[] {}), fileName);
}
示例2: add
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
synchronized boolean add(
BlockInfo blockInfo,
int curReplicas,
int decomissionedReplicas,
int expectedReplicas) {
if(curReplicas<0 || expectedReplicas <= curReplicas) {
return false;
}
int priLevel = getPriority(blockInfo, curReplicas, decomissionedReplicas,
expectedReplicas);
INodeFile fileINode = blockInfo.getINode();
if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS && fileINode != null &&
fileINode.getStorageType().equals(StorageType.RAID_STORAGE)) {
RaidCodec codec = ((INodeRaidStorage) fileINode.getStorage()).getCodec();
return raidQueue.add(blockInfo, codec);
}
if(priLevel != LEVEL && priorityQueues.get(priLevel).add(blockInfo)) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.add:"
+ blockInfo
+ " has only "+curReplicas
+ " replicas and need " + expectedReplicas
+ " replicas so is added to neededReplications"
+ " at priority level " + priLevel);
}
return true;
}
return false;
}
示例3: remove
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
synchronized boolean remove(BlockInfo blockInfo, int priLevel) {
INodeFile fileINode = blockInfo.getINode();
if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS && fileINode != null &&
fileINode.getStorageType().equals(StorageType.RAID_STORAGE)) {
RaidCodec codec = ((INodeRaidStorage) fileINode.getStorage()).getCodec();
return raidQueue.remove(blockInfo, codec);
}
if(priLevel >= 0 && priLevel < LEVEL
&& priorityQueues.get(priLevel).remove(blockInfo)) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
+ "Removing block " + blockInfo
+ " from priority queue "+ priLevel);
}
return true;
} else {
for(int i=0; i<LEVEL; i++) {
if(i!=priLevel && priorityQueues.get(i).remove(blockInfo)) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
+ "Removing block " + blockInfo
+ " from priority queue "+ i);
}
return true;
}
}
}
return false;
}
示例4: add
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
/**
* Add a missing block to the queue.
*
*/
boolean add(BlockInfo blockInfo) {
INodeFile fileINode = blockInfo.getINode();
try {
int blockIndex = getBlockIndex(blockInfo);
RaidBlockInfo firstBlock = fileINode.getFirstBlockInStripe(blockInfo, blockIndex);
HashSet<Integer> missingBlkIdxs = null;
int i = 0;
for (; i < maxLevel; i++) {
HashMap<RaidBlockInfo, HashSet<Integer>> queue = priorityQueues.get(i);
if (queue.containsKey(firstBlock)) {
missingBlkIdxs = queue.get(firstBlock);
if (missingBlkIdxs.contains(blockIndex)) {
return false;
}
if (i == maxLevel - 1) {
missingBlkIdxs.add(blockIndex);
return true;
}
queue.remove(firstBlock);
break;
}
}
if (missingBlkIdxs == null) {
// no other missing blocks in this stripe
missingBlkIdxs = new HashSet<Integer>(1);
missingBlkIdxs.add(blockIndex);
priorityQueues.get(0).put(firstBlock, missingBlkIdxs);
} else {
// there are other missing blocks in this stripe
missingBlkIdxs.add(blockIndex);
priorityQueues.get(i + 1).put(firstBlock, missingBlkIdxs);
}
return true;
} catch (IOException ex) {
LOG.warn("Failed to add block into Raid missing blocks queue: " +
"block: " + blockInfo +
"file: " + fileINode +
"codec: " + codec.id,
ex);
return false;
}
}
示例5: remove
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
/**
* remove a missing block from the queue.
*/
boolean remove(BlockInfo blockInfo) {
INodeFile fileINode = blockInfo.getINode();
try {
int blockIndex = getBlockIndex(blockInfo);
RaidBlockInfo firstBlock = fileINode.getFirstBlockInStripe(blockInfo, blockIndex);
HashSet<Integer> missingBlkIdxs = null;
int i = 0;
for (; i < maxLevel; i++) {
HashMap<RaidBlockInfo, HashSet<Integer>> queue = priorityQueues.get(i);
if (queue.containsKey(firstBlock)) {
missingBlkIdxs = queue.get(firstBlock);
if (!missingBlkIdxs.contains(blockIndex)) {
return false;
}
break;
}
}
if (missingBlkIdxs == null) {
// can not find any missing blocks in this stripe
return false;
} else {
missingBlkIdxs.remove(blockIndex);
if (missingBlkIdxs.size() == 0) {
priorityQueues.get(i).remove(firstBlock);
} else if (missingBlkIdxs.size() < (i + 1)) {
priorityQueues.get(i).remove(firstBlock);
priorityQueues.get(i - 1).put(firstBlock, missingBlkIdxs);
}
}
return true;
} catch (Exception ex) {
LOG.warn("Failed to remove block from Raid missing blocks queue: " +
"block: " + blockInfo +
"file: " + fileINode +
"codec: " + codec.id,
ex);
return false;
}
}
示例6: markBlockAsCorrupt
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
/**
* Mark the block belonging to datanode as corrupt
*
* @param blk Block to be marked as corrupt
* @param dn Datanode which holds the corrupt replica
* @param parallelInitialBlockReport indicates that this call
* is a result of parallel initial block report
*/
public void markBlockAsCorrupt(Block blk, DatanodeInfo dn,
final boolean parallelInitialBlockReport) throws IOException {
if (!parallelInitialBlockReport) {
// regular call, not through parallel block report
writeLock();
}
lockParallelBRLock(parallelInitialBlockReport);
try {
DatanodeDescriptor node = getDatanode(dn);
if (node == null) {
throw new IOException("Cannot mark block" + blk.getBlockName() +
" as corrupt because datanode " + dn.getName() +
" does not exist. ");
}
final BlockInfo storedBlockInfo = blocksMap.getStoredBlock(blk);
if (storedBlockInfo == null) {
// Check if the replica is in the blockMap, if not
// ignore the request for now. This could happen when BlockScanner
// thread of Datanode reports bad block before Block reports are sent
// by the Datanode on startup
NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " +
"block " + blk + " could not be marked " +
"as corrupt as it does not exists in " +
"blocksMap");
} else {
INodeFile inode = storedBlockInfo.getINode();
if (inode == null) {
NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " +
"block " + blk + " could not be marked " +
"as corrupt as it does not belong to " +
"any file");
addToInvalidates(storedBlockInfo, node, false);
return;
}
// Add this replica to corruptReplicas Map
if (!corruptReplicas.addToCorruptReplicasMap(storedBlockInfo, node)) {
return;
}
NumberReplicas num = countNodes(storedBlockInfo);
short blockReplication = inode.getBlockReplication(storedBlockInfo);
if (num.liveReplicas() > blockReplication) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(storedBlockInfo, node, true);
} else if (isPopulatingReplQueuesInternal()) {
// add the block to neededReplication
int numCurrentReplicas = num.liveReplicas() +
pendingReplications.getNumReplicas(storedBlockInfo);
updateNeededReplicationQueue(storedBlockInfo, -1, numCurrentReplicas,
num.decommissionedReplicas, node, blockReplication);
}
}
} finally {
if (!parallelInitialBlockReport) {
writeUnlock();
}
unlockParallelBRLock(parallelInitialBlockReport);
}
}
示例7: processBlocksBeingWrittenReport
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
public boolean processBlocksBeingWrittenReport(DatanodeID nodeID,
BlockListAsLongs blocksBeingWritten)
throws IOException {
// check if we can discard the report
if (safeMode != null && !safeMode.shouldProcessRBWReports()) {
return false;
}
writeLock();
try {
DatanodeDescriptor dataNode = getDatanode(nodeID);
if (dataNode == null) {
throw new IOException("ProcessReport from unregisterted node: "
+ nodeID.getName());
}
Block block = new Block();
for (int i = 0; i < blocksBeingWritten.getNumberOfBlocks(); i ++) {
block.set(blocksBeingWritten.getBlockId(i),
blocksBeingWritten.getBlockLen(i),
blocksBeingWritten.getBlockGenStamp(i));
BlockInfo storedBlock = blocksMap.getStoredBlockWithoutMatchingGS(block);
if (storedBlock == null) {
rejectAddStoredBlock(
new Block(block), dataNode,
"Block not in blockMap with any generation stamp",
true, false);
continue;
}
INodeFile inode = storedBlock.getINode();
if (inode == null) {
rejectAddStoredBlock(
new Block(block), dataNode,
"Block does not correspond to any file",
true, false);
continue;
}
boolean underConstruction = inode.isUnderConstruction();
boolean isLastBlock = inode.getLastBlock() != null &&
inode.getLastBlock().getBlockId() == block.getBlockId();
// Must be the last block of a file under construction,
if (!underConstruction) {
rejectAddStoredBlock(
new Block(block), dataNode,
"Reported as block being written but is a block of closed file.",
true, false);
continue;
}
if (!isLastBlock) {
rejectAddStoredBlock(
new Block(block), dataNode,
"Reported as block being written but not the last block of " +
"an under-construction file.",
true, false);
continue;
}
INodeFileUnderConstruction pendingFile =
(INodeFileUnderConstruction)inode;
boolean added = pendingFile.addTarget(dataNode, block.getGenerationStamp());
if (added) {
// Increment only once for each datanode.
DatanodeDescriptor[] validDNs = pendingFile.getValidTargets();
if (validDNs != null) {
incrementSafeBlockCount(validDNs.length, true);
}
}
}
} finally {
writeUnlock();
checkSafeMode();
}
return true;
}
示例8: findOverReplicatedReplicas
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
/**
* Find how many of the containing nodes are "extra", if any.
* If there are any extras, call chooseExcessReplicates() to
* mark them in the excessReplicateMap.
*
* @param excessReplicateMapTmp replicas that can possibly be in excess
* @param originalDatanodes all currently valid replicas of this block
*/
private void findOverReplicatedReplicas(Block block, short replication,
DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint,
List<DatanodeID> excessReplicateMapTmp,
List<DatanodeID> originalDatanodes) {
Collection<DatanodeDescriptor> nonExcess;
INodeFile inode;
readLock();
try {
BlockInfo storedBlock = blocksMap.getBlockInfo(block);
inode = (storedBlock == null) ? null : storedBlock.getINode();
if (inode == null) {
return; // file has been deleted already, nothing to do.
}
// if the caller did not specify what the target replication factor
// of the file, then fetch it from the inode. This happens when invoked
// by the ReplicationMonitor thread.
if (replication < 0) {
replication = inode.getBlockReplication(storedBlock);
}
if (addedNode == delNodeHint) {
delNodeHint = null;
}
nonExcess = new ArrayList<DatanodeDescriptor>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block);
for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
it.hasNext();) {
DatanodeDescriptor cur = it.next();
Collection<Block> excessBlocks = excessReplicateMap.get(cur.getStorageID());
if (excessBlocks == null || !excessBlocks.contains(block)) {
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
// exclude corrupt replicas
if (corruptNodes == null || !corruptNodes.contains(cur)) {
nonExcess.add(cur);
originalDatanodes.add(cur);
}
}
}
}
} finally {
readUnlock();
}
// this can be called without the FSnamesystem lock because it does not
// use any global data structures. Also, the inode is passed as it is to
// the Pluggable blockplacement policy.
chooseExcessReplicates(nonExcess, block, replication,
addedNode, delNodeHint, inode, excessReplicateMapTmp);
}
示例9: removeStoredBlock
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
/**
* Modify (block-->datanode) map. Possibly generate
* replication tasks, if the removed block is still valid.
*/
private void removeStoredBlock(Block block, DatanodeDescriptor node) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+ block + " from " + node.getName());
}
if (!blocksMap.removeNode(block, node)) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: "
+ block + " has already been removed from node " + node);
}
return;
}
//
// if file is being actively written to and it is the last block,
// then do not check replication-factor here.
//
BlockInfo storedBlock = blocksMap.getStoredBlock(block);
INodeFile fileINode = (storedBlock == null) ? null : storedBlock.getINode();
if (fileINode != null &&
fileINode.isUnderConstruction() && fileINode.isLastBlock(storedBlock)) {
decrementSafeBlockCount(block);
return;
}
//
// It's possible that the block was removed because of a datanode
// failure. If the block is still valid, check if replication is
// necessary. In that case, put block on a possibly-will-
// be-replicated list.
//
if (fileINode != null) {
decrementSafeBlockCount(block);
// handle under replication
// Use storedBlock here because block maybe a deleted block with size DELETED
if (isPopulatingReplQueuesInternal()) {
NumberReplicas num = countNodes(storedBlock);
int numCurrentReplicas = num.liveReplicas()
+
pendingReplications.getNumReplicas(storedBlock);
updateNeededReplicationQueue(storedBlock, -1, numCurrentReplicas,
num.decommissionedReplicas, node,
fileINode.getBlockReplication(storedBlock));
}
}
//
// We've removed a block from a node, so it's definitely no longer
// in "excess" there.
//
removeFromExcessReplicateMap(block, node);
// Remove the replica from corruptReplicas
corruptReplicas.removeFromCorruptReplicasMap(block, node);
}
示例10: isRaidedBlock
import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo; //导入方法依赖的package包/类
private boolean isRaidedBlock(BlockInfo block) {
INodeFile fileINode = block.getINode();
return fileINode != null && fileINode.getStorageType().equals(StorageType.RAID_STORAGE);
}