本文整理汇总了Java中org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction.setGenerationStamp方法的典型用法代码示例。如果您正苦于以下问题:Java BlockInfoUnderConstruction.setGenerationStamp方法的具体用法?Java BlockInfoUnderConstruction.setGenerationStamp怎么用?Java BlockInfoUnderConstruction.setGenerationStamp使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction
的用法示例。
在下文中一共展示了BlockInfoUnderConstruction.setGenerationStamp方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: makeNameSystemSpy
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; //导入方法依赖的package包/类
private FSNamesystem makeNameSystemSpy(Block block,
INodeFileUnderConstruction file)
throws IOException {
Configuration conf = new Configuration();
FSImage image = new FSImage(conf);
DatanodeDescriptor[] targets = new DatanodeDescriptor[0];
FSNamesystem namesystem = new FSNamesystem(conf, image);
FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
doReturn(true).when(file).removeLastBlock(any(Block.class));
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
any(INodeFileUnderConstruction.class),
any(BlockInfo.class));
doReturn("").when(namesystemSpy).persistBlocks(
any(INodeFileUnderConstruction.class), anyBoolean());
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
return namesystemSpy;
}
示例2: makeNameSystemSpy
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; //导入方法依赖的package包/类
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
throws IOException {
Configuration conf = new Configuration();
FSImage image = new FSImage(conf);
final DatanodeStorageInfo[] targets = {};
FSNamesystem namesystem = new FSNamesystem(conf, image);
FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
doReturn(true).when(file).removeLastBlock(any(Block.class));
doReturn(true).when(file).isUnderConstruction();
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
any(INodeFile.class), any(BlockInfo.class));
doReturn("").when(namesystemSpy).persistBlocks(
any(INodeFile.class), anyBoolean());
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
return namesystemSpy;
}
示例3: makeNameSystemSpy
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; //导入方法依赖的package包/类
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
throws IOException {
Configuration conf = new Configuration();
FSImage image = new FSImage(conf);
final DatanodeStorageInfo[] targets = {};
FSNamesystem namesystem = new FSNamesystem(conf, image);
namesystem.setImageLoaded(true);
// set file's parent as root and put the file to inodeMap, so
// FSNamesystem's isFileDeleted() method will return false on this file
if (file.getParent() == null) {
INodeDirectory mparent = mock(INodeDirectory.class);
INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0],
mparent.getPermissionStatus(), mparent.getAccessTime());
parent.setLocalName(new byte[0]);
parent.addChild(file);
file.setParent(parent);
}
namesystem.dir.getINodeMap().put(file);
FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
doReturn(true).when(file).removeLastBlock(any(Block.class));
doReturn(true).when(file).isUnderConstruction();
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
any(INodeFile.class), any(BlockInfo.class));
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
return namesystemSpy;
}
示例4: makeNameSystemSpy
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; //导入方法依赖的package包/类
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
throws IOException {
Configuration conf = new Configuration();
FSImage image = new FSImage(conf);
final DatanodeStorageInfo[] targets = {};
FSNamesystem namesystem = new FSNamesystem(conf, image);
namesystem.setImageLoaded(true);
// set file's parent as root and put the file to inodeMap, so
// FSNamesystem's isFileDeleted() method will return false on this file
if (file.getParent() == null) {
INodeDirectory parent = mock(INodeDirectory.class);
parent.setLocalName(new byte[0]);
parent.addChild(file);
file.setParent(parent);
}
namesystem.dir.getINodeMap().put(file);
FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
doReturn(true).when(file).removeLastBlock(any(Block.class));
doReturn(true).when(file).isUnderConstruction();
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
any(INodeFile.class), any(BlockInfo.class));
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
return namesystemSpy;
}
示例5: updatePipelineInternal
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; //导入方法依赖的package包/类
/**
* @see #updatePipeline(String, ExtendedBlock, ExtendedBlock, DatanodeID[])
*/
private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes)
throws IOException {
// check the validity of the block and lease holder name
final INodeFileUnderConstruction pendingFile =
checkUCBlock(oldBlock, clientName);
pendingFile.updateLastTwoBlocks(leaseManager.getLease(clientName));
final BlockInfoUnderConstruction blockInfo =
(BlockInfoUnderConstruction) pendingFile.getLastBlock();
// check new GS & length: this is not expected
if (newBlock.getGenerationStamp() <= blockInfo.getGenerationStamp() ||
newBlock.getNumBytes() < blockInfo.getNumBytes()) {
String msg = "Update " + oldBlock + " (len = " +
blockInfo.getNumBytes() + ") to an older state: " + newBlock +
" (len = " + newBlock.getNumBytes() + ")";
LOG.warn(msg);
throw new IOException(msg);
}
//Make sure the hashes are corrected to avoid leaving stale replicas behind
for (DatanodeDescriptor oldLocation :
blockInfo.getDatanodes(getBlockManager().getDatanodeManager())){
HashBuckets.getInstance().undoHash(oldLocation.getSId(),
HdfsServerConstants.ReplicaState.FINALIZED, oldBlock.getLocalBlock());
}
// Update old block with the new generation stamp and new length
blockInfo.setGenerationStamp(newBlock.getGenerationStamp());
blockInfo.setNumBytes(newBlock.getNumBytes());
pendingFile.recomputeFileSize();
// find the DatanodeDescriptor objects
final DatanodeManager dm = getBlockManager().getDatanodeManager();
DatanodeDescriptor[] descriptors = null;
if (newNodes.length > 0) {
descriptors = new DatanodeDescriptor[newNodes.length];
for (int i = 0; i < newNodes.length; i++) {
descriptors[i] = dm.getDatanode(newNodes[i]);
}
}
blockInfo.setExpectedLocations(descriptors);
}