本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext方法的典型用法代码示例。如果您正苦于以下问题:Java INode.ReclaimContext方法的具体用法?Java INode.ReclaimContext怎么用?Java INode.ReclaimContext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.INode
的用法示例。
在下文中一共展示了INode.ReclaimContext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
public void cleanFile(INode.ReclaimContext reclaimContext,
final INodeFile file, final int snapshotId, int priorSnapshotId,
byte storagePolicyId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
.getPolicy(storagePolicyId);
QuotaCounts old = file.storagespaceConsumed(policy);
collectBlocksAndClear(reclaimContext, file);
QuotaCounts current = file.storagespaceConsumed(policy);
reclaimContext.quotaDelta().add(old.subtract(current));
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
file);
}
}
示例2: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
public void collectBlocksAndClear(
INode.ReclaimContext reclaimContext, final INodeFile file) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.clearFile(reclaimContext);
return;
}
// find max file size.
final long max;
FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
// Collect blocks that should be deleted
FileDiff last = diffs.getLast();
BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
else
file.collectBlocksBeyondSnapshot(snapshotBlocks,
reclaimContext.collectedBlocks());
}
示例3: removeSnapshot
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
* Remove the snapshot with the given name from {@link #snapshotsByNames},
* and delete all the corresponding DirectoryDiff.
*
* @param reclaimContext records blocks and inodes that need to be reclaimed
* @param snapshotRoot The directory where we take snapshots
* @param snapshotName The name of the snapshot to be removed
* @return The removed snapshot. Null if no snapshot with the given name
* exists.
*/
public Snapshot removeSnapshot(
INode.ReclaimContext reclaimContext, INodeDirectory snapshotRoot,
String snapshotName) throws SnapshotException {
final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
if (i < 0) {
throw new SnapshotException("Cannot delete snapshot " + snapshotName
+ " from path " + snapshotRoot.getFullPathName()
+ ": the snapshot does not exist.");
} else {
final Snapshot snapshot = snapshotsByNames.get(i);
int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
snapshotRoot.cleanSubtree(reclaimContext, snapshot.getId(), prior);
// remove from snapshotsByNames after successfully cleaning the subtree
snapshotsByNames.remove(i);
return snapshot;
}
}
示例4: deleteSnapshot
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
* Delete a snapshot for a snapshottable directory
* @param snapshotName Name of the snapshot to be deleted
* @param reclaimContext Used to collect information to reclaim blocks
* and inodes
*/
public void deleteSnapshot(final INodesInPath iip, final String snapshotName,
INode.ReclaimContext reclaimContext) throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
srcRoot.removeSnapshot(reclaimContext, snapshotName);
numSnapshots.getAndDecrement();
}
示例5: combinePosteriorAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
@Override
void combinePosteriorAndCollectBlocks(
INode.ReclaimContext reclaimContext, INodeFile currentINode,
FileDiff posterior) {
FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
sf.updateQuotaAndCollectBlocks(reclaimContext, currentINode, posterior);
}
示例6: deleteSnapshotDiff
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
* Delete a snapshot. The synchronization of the diff list will be done
* outside. If the diff to remove is not the first one in the diff list, we
* need to combine the diff with its previous one.
*
* @param reclaimContext blocks and inodes that need to be reclaimed
* @param snapshot The id of the snapshot to be deleted
* @param prior The id of the snapshot taken before the to-be-deleted snapshot
* @param currentINode the inode where the snapshot diff is deleted
*/
public final void deleteSnapshotDiff(INode.ReclaimContext reclaimContext,
final int snapshot, final int prior, final N currentINode) {
int snapshotIndex = Collections.binarySearch(diffs, snapshot);
D removed;
if (snapshotIndex == 0) {
if (prior != Snapshot.NO_SNAPSHOT_ID) { // there is still snapshot before
// set the snapshot to latestBefore
diffs.get(snapshotIndex).setSnapshotId(prior);
} else { // there is no snapshot before
removed = diffs.remove(0);
removed.destroyDiffAndCollectBlocks(reclaimContext, currentINode);
}
} else if (snapshotIndex > 0) {
final AbstractINodeDiff<N, A, D> previous = diffs.get(snapshotIndex - 1);
if (previous.getSnapshotId() != prior) {
diffs.get(snapshotIndex).setSnapshotId(prior);
} else {
// combine the to-be-removed diff with its previous diff
removed = diffs.remove(snapshotIndex);
if (previous.snapshotINode == null) {
previous.snapshotINode = removed.snapshotINode;
}
previous.combinePosteriorAndCollectBlocks(reclaimContext, currentINode,
removed);
previous.setPosterior(removed.getPosterior());
removed.setPosterior(null);
}
}
}
示例7: destroyCreatedList
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/** clear the created list */
private void destroyCreatedList(INode.ReclaimContext reclaimContext,
final INodeDirectory currentINode) {
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
c.destroyAndCollectBlocks(reclaimContext);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
createdList.clear();
}
示例8: destroyDeletedList
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/** clear the deleted list */
private void destroyDeletedList(INode.ReclaimContext reclaimContext) {
final List<INode> deletedList = getList(ListType.DELETED);
for (INode d : deletedList) {
d.destroyAndCollectBlocks(reclaimContext);
}
deletedList.clear();
}
示例9: combinePosteriorAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
@Override
void combinePosteriorAndCollectBlocks(
final INode.ReclaimContext reclaimContext,
final INodeDirectory currentDir,
final DirectoryDiff posterior) {
diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
/** Collect blocks for deleted files. */
@Override
public void process(INode inode) {
if (inode != null) {
inode.destroyAndCollectBlocks(reclaimContext);
}
}
});
}
示例10: destroyDiffAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
@Override
void destroyDiffAndCollectBlocks(
INode.ReclaimContext reclaimContext, INodeDirectory currentINode) {
// this diff has been deleted
diff.destroyDeletedList(reclaimContext);
INodeDirectoryAttributes snapshotINode = getSnapshotINode();
if (snapshotINode != null && snapshotINode.getAclFeature() != null) {
AclStorage.removeAclFeature(snapshotINode.getAclFeature());
}
}
示例11: clear
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
public void clear(
INode.ReclaimContext reclaimContext, INodeDirectory currentINode) {
// destroy its diff list
for (DirectoryDiff diff : diffs) {
diff.destroyDiffAndCollectBlocks(reclaimContext, currentINode);
}
diffs.clear();
}
示例12: combinePosteriorAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/** Combine the posterior diff and collect blocks for deletion. */
abstract void combinePosteriorAndCollectBlocks(
INode.ReclaimContext reclaimContext, final N currentINode,
final D posterior);
示例13: combineAndCollectSnapshotBlocks
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
* Copy blocks from the removed snapshot into the previous snapshot
* up to the file length of the latter.
* Collect unused blocks of the removed snapshot.
*/
void combineAndCollectSnapshotBlocks(
INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
BlockInfo[] removedBlocks = removed.getBlocks();
if (removedBlocks == null) {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
if(sf.isCurrentFileDeleted())
sf.collectBlocksAndClear(reclaimContext, file);
return;
}
int p = getPrior(removed.getSnapshotId(), true);
FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p);
// Copy blocks to the previous snapshot if not set already
if (earlierDiff != null) {
earlierDiff.setBlocks(removedBlocks);
}
BlockInfo[] earlierBlocks =
(earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks());
// Find later snapshot (or file itself) with blocks
BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
laterBlocks = (laterBlocks == null) ? file.getBlocks() : laterBlocks;
// Skip blocks, which belong to either the earlier or the later lists
int i = 0;
for(; i < removedBlocks.length; i++) {
if(i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i])
continue;
if(i < laterBlocks.length && removedBlocks[i] == laterBlocks[i])
continue;
break;
}
// Check if last block is part of truncate recovery
BlockInfo lastBlock = file.getLastBlock();
Block dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals(
HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
dontRemoveBlock = lastBlock.getUnderConstructionFeature()
.getTruncateBlock();
}
// Collect the remaining blocks of the file, ignoring truncate block
for (;i < removedBlocks.length; i++) {
if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
}
}
}
示例14: destroyDiffAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
@Override
void destroyDiffAndCollectBlocks(INode.ReclaimContext reclaimContext,
INodeFile currentINode) {
currentINode.getFileWithSnapshotFeature().updateQuotaAndCollectBlocks(
reclaimContext, currentINode, this);
}
示例15: destroyDstSubtree
import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
* Destroy a subtree under a DstReference node.
*/
public static void destroyDstSubtree(INode.ReclaimContext reclaimContext,
INode inode, final int snapshot, final int prior) {
Preconditions.checkArgument(prior != NO_SNAPSHOT_ID);
if (inode.isReference()) {
if (inode instanceof INodeReference.WithName
&& snapshot != Snapshot.CURRENT_STATE_ID) {
// this inode has been renamed before the deletion of the DstReference
// subtree
inode.cleanSubtree(reclaimContext, snapshot, prior);
} else {
// for DstReference node, continue this process to its subtree
destroyDstSubtree(reclaimContext,
inode.asReference().getReferredINode(), snapshot, prior);
}
} else if (inode.isFile()) {
inode.cleanSubtree(reclaimContext, snapshot, prior);
} else if (inode.isDirectory()) {
Map<INode, INode> excludedNodes = null;
INodeDirectory dir = inode.asDirectory();
DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
if (sf != null) {
DirectoryDiffList diffList = sf.getDiffs();
DirectoryDiff priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
excludedNodes = cloneDiffList(dList);
}
if (snapshot != Snapshot.CURRENT_STATE_ID) {
diffList.deleteSnapshotDiff(reclaimContext,
snapshot, prior, dir);
}
priorDiff = diffList.getDiffById(prior);
if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
priorDiff.diff.destroyCreatedList(reclaimContext, dir);
}
}
for (INode child : inode.asDirectory().getChildrenList(prior)) {
if (excludedNodes != null && excludedNodes.containsKey(child)) {
continue;
}
destroyDstSubtree(reclaimContext, child, snapshot, prior);
}
}
}