本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeFile.recordModification方法的典型用法代码示例。如果您正苦于以下问题:Java INodeFile.recordModification方法的具体用法?Java INodeFile.recordModification怎么用?Java INodeFile.recordModification使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.INodeFile
的用法示例。
在下文中一共展示了INodeFile.recordModification方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
return new QuotaCounts.Builder().build();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes);
}
}
示例2: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
public void cleanFile(INode.ReclaimContext reclaimContext,
final INodeFile file, final int snapshotId, int priorSnapshotId,
byte storagePolicyId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
final BlockStoragePolicy policy = reclaimContext.storagePolicySuite()
.getPolicy(storagePolicyId);
QuotaCounts old = file.storagespaceConsumed(policy);
collectBlocksAndClear(reclaimContext, file);
QuotaCounts current = file.storagespaceConsumed(policy);
reclaimContext.quotaDelta().add(old.subtract(current));
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
diffs.deleteSnapshotDiff(reclaimContext, snapshotId, priorSnapshotId,
file);
}
}
示例3: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes);
}
}
示例4: cleanFile
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
public Quota.Counts cleanFile(final INodeFile file, final int snapshotId,
int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshotId == Snapshot.CURRENT_STATE_ID) {
// delete the current file while the file has snapshot feature
if (!isCurrentFileDeleted()) {
file.recordModification(priorSnapshotId);
deleteCurrentFile();
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete the snapshot
priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
return diffs.deleteSnapshotDiff(snapshotId, priorSnapshotId, file,
collectedBlocks, removedINodes, countDiffChange);
}
}