本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature.clearDiffs方法的典型用法代码示例。如果您正苦于以下问题:Java FileWithSnapshotFeature.clearDiffs方法的具体用法?Java FileWithSnapshotFeature.clearDiffs怎么用?Java FileWithSnapshotFeature.clearDiffs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature
的用法示例。
在下文中一共展示了FileWithSnapshotFeature.clearDiffs方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: destroyAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入方法依赖的package包/类
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
if (blocks != null && collectedBlocks != null) {
for (BlockInfoContiguous blk : blocks) {
collectedBlocks.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}
setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
}
clear();
removedINodes.add(this);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
sf.clearDiffs();
}
}
示例2: destroyAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入方法依赖的package包/类
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
// TODO pass in the storage policy
reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps,
false));
clearFile(reclaimContext);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().destroyAndCollectSnapshotBlocks(
reclaimContext.collectedBlocks);
sf.clearDiffs();
}
if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
}
示例3: destroyAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入方法依赖的package包/类
@Override
public void destroyAndCollectBlocks(BlockStoragePolicySuite bsps,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
if (blocks != null && collectedBlocks != null) {
for (BlockInfoContiguous blk : blocks) {
collectedBlocks.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}
setBlocks(null);
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
}
clear();
removedINodes.add(this);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().destroyAndCollectSnapshotBlocks(collectedBlocks);
sf.clearDiffs();
}
}
示例4: destroyAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature; //导入方法依赖的package包/类
@Override
public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
if (blocks != null && collectedBlocks != null) {
for (BlockInfo blk : blocks) {
collectedBlocks.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}
setBlocks(null);
clear();
removedINodes.add(this);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
sf.clearDiffs();
}
}