本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.DatanodeUtil.idToBlockDir方法的典型用法代码示例。如果您正苦于以下问题:Java DatanodeUtil.idToBlockDir方法的具体用法?Java DatanodeUtil.idToBlockDir怎么用?Java DatanodeUtil.idToBlockDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.datanode.DatanodeUtil
的用法示例。
在下文中一共展示了DatanodeUtil.idToBlockDir方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyBlockDeletedFromDir
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
protected final boolean verifyBlockDeletedFromDir(File dir,
LocatedBlocks locatedBlocks) {
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
File targetDir =
DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
" exists after deletion.");
return false;
}
File metaFile = new File(targetDir,
DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
lb.getBlock().getGenerationStamp()));
if (metaFile.exists()) {
LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
" exists after deletion.");
return false;
}
}
return true;
}
示例2: copyBlockFiles
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Copy the block and meta files for the given block to the given destination.
* @return the new meta and block files.
* @throws IOException
*/
static File[] copyBlockFiles(long blockId, long genStamp,
File srcMeta, File srcFile, File destRoot)
throws IOException {
final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
final File dstFile = new File(destDir, srcFile.getName());
final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
computeChecksum(srcMeta, dstMeta, srcFile);
try {
Storage.nativeCopyFileUnbuffered(srcFile, dstFile, true);
} catch (IOException e) {
throw new IOException("Failed to copy " + srcFile + " to " + dstFile, e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copied " + srcMeta + " to " + dstMeta +
" and calculated checksum");
LOG.debug("Copied " + srcFile + " to " + dstFile);
}
return new File[] {dstMeta, dstFile};
}
示例3: copyBlockFiles
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Copy the block and meta files for the given block to the given destination.
* @return the new meta and block files.
* @throws IOException
*/
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
File srcFile, File destRoot, boolean calculateChecksum)
throws IOException {
final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
final File dstFile = new File(destDir, srcFile.getName());
final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum);
}
示例4: copyReplicaWithNewBlockIdAndGS
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
private File[] copyReplicaWithNewBlockIdAndGS(
ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
throws IOException {
String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
FsVolumeReference v = volumes.getNextVolume(
replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
final File tmpDir = ((FsVolumeImpl) v.getVolume())
.getBlockPoolSlice(bpid).getTmpDir();
final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
final File dstBlockFile = new File(destDir, blockFileName);
final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
return copyBlockFiles(replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
dstMetaFile, dstBlockFile, true);
}
示例5: addBlock
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
File addBlock(Block b, File f) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
if (!blockDir.exists()) {
if (!blockDir.mkdirs()) {
throw new IOException("Failed to mkdirs " + blockDir);
}
}
File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
示例6: activateSavedReplica
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Move a persisted replica from lazypersist directory to a subdirectory
* under finalized.
*/
File activateSavedReplica(Block b, File metaFile, File blockFile)
throws IOException {
final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
final File targetBlockFile = new File(blockDir, blockFile.getName());
final File targetMetaFile = new File(blockDir, metaFile.getName());
FileUtils.moveFile(blockFile, targetBlockFile);
FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
FileUtils.moveFile(metaFile, targetMetaFile);
FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
return targetBlockFile;
}
示例7: ensureLazyPersistBlocksAreSaved
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Make sure at least one non-transient volume has a saved copy of the replica.
* An infinite loop is used to ensure the async lazy persist tasks are completely
* done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
* either a successful pass or timeout failure.
*/
protected final void ensureLazyPersistBlocksAreSaved(
LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
final String bpid = cluster.getNamesystem().getBlockPoolId();
List<? extends FsVolumeSpi> volumes =
cluster.getDataNodes().get(0).getFSDataset().getVolumes();
final Set<Long> persistedBlockIds = new HashSet<Long>();
while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
// Take 1 second sleep before each verification iteration
Thread.sleep(1000);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (FsVolumeSpi v : volumes) {
if (v.isTransientStorage()) {
continue;
}
FsVolumeImpl volume = (FsVolumeImpl) v;
File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();
long blockId = lb.getBlock().getBlockId();
File targetDir =
DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
// Found a persisted copy for this block and added to the Set
persistedBlockIds.add(blockId);
}
}
}
}
// We should have found a persisted copy for each located block.
assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
示例8: copyBlockFiles
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Copy the block and meta files for the given block to the given destination.
* @return the new meta and block files.
* @throws IOException
*/
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
File srcFile, File destRoot, boolean calculateChecksum,
int smallBufferSize, final Configuration conf) throws IOException {
final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
final File dstFile = new File(destDir, srcFile.getName());
final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum,
smallBufferSize, conf);
}
示例9: copyReplicaWithNewBlockIdAndGS
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
private File[] copyReplicaWithNewBlockIdAndGS(
ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
throws IOException {
String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
final File dstBlockFile = new File(destDir, blockFileName);
final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
return copyBlockFiles(replicaInfo.getMetaFile(),
replicaInfo.getBlockFile(),
dstMetaFile, dstBlockFile, true, smallBufferSize, conf);
}
示例10: addFinalizedBlock
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
File addFinalizedBlock(Block b, File f) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
if (!blockDir.exists()) {
if (!blockDir.mkdirs()) {
throw new IOException("Failed to mkdirs " + blockDir);
}
}
File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
示例11: ensureLazyPersistBlocksAreSaved
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Make sure at least one non-transient volume has a saved copy of the replica.
* An infinite loop is used to ensure the async lazy persist tasks are completely
* done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
* either a successful pass or timeout failure.
*/
protected final void ensureLazyPersistBlocksAreSaved(
LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
final String bpid = cluster.getNamesystem().getBlockPoolId();
final Set<Long> persistedBlockIds = new HashSet<Long>();
try (FsDatasetSpi.FsVolumeReferences volumes =
cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks()
.size()) {
// Take 1 second sleep before each verification iteration
Thread.sleep(1000);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (FsVolumeSpi v : volumes) {
if (v.isTransientStorage()) {
continue;
}
FsVolumeImpl volume = (FsVolumeImpl) v;
File lazyPersistDir =
volume.getBlockPoolSlice(bpid).getLazypersistDir();
long blockId = lb.getBlock().getBlockId();
File targetDir =
DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
// Found a persisted copy for this block and added to the Set
persistedBlockIds.add(blockId);
}
}
}
}
}
// We should have found a persisted copy for each located block.
assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
示例12: testLazyPersistBlocksAreSaved
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
@Test
public void testLazyPersistBlocksAreSaved()
throws IOException, InterruptedException {
startUpCluster(true, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
// Create a test file
makeTestFile(path, BLOCK_SIZE * 10, true);
LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);
// Sleep for a short time to allow the lazy writer thread to do its job
Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
LOG.info("Verifying copy was saved to lazyPersist/");
// Make sure that there is a saved copy of the replica on persistent
// storage.
final String bpid = cluster.getNamesystem().getBlockPoolId();
List<? extends FsVolumeSpi> volumes =
cluster.getDataNodes().get(0).getFSDataset().getVolumes();
final Set<Long> persistedBlockIds = new HashSet<Long>();
// Make sure at least one non-transient volume has a saved copy of
// the replica.
for (FsVolumeSpi v : volumes) {
if (v.isTransientStorage()) {
continue;
}
FsVolumeImpl volume = (FsVolumeImpl) v;
File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, lb.getBlock().getBlockId());
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
// Found a persisted copy for this block!
boolean added = persistedBlockIds.add(lb.getBlock().getBlockId());
assertThat(added, is(true));
} else {
LOG.error(blockFile + " not found");
}
}
}
// We should have found a persisted copy for each located block.
assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
示例13: getBlockFile
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Get file correpsonding to a block
* @param storageDir storage directory
* @param blk the block
* @return data file corresponding to the block
*/
public static File getBlockFile(File storageDir, ExtendedBlock blk) {
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
}
示例14: getBlockMetadataFile
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; //导入方法依赖的package包/类
/**
* Get the latest metadata file correpsonding to a block
* @param storageDir storage directory
* @param blk the block
* @return metadata file corresponding to the block
*/
public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
blk.getGenerationStamp() + Block.METADATA_EXTENSION);
}