本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeFile.computeFileSize方法的典型用法代码示例。如果您正苦于以下问题:Java INodeFile.computeFileSize方法的具体用法?Java INodeFile.computeFileSize怎么用?Java INodeFile.computeFileSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.INodeFile
的用法示例。
在下文中一共展示了INodeFile.computeFileSize方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.destroyAndCollectBlocks(bsps, info, removedINodes);
return;
}
// find max file size.
final long max;
FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
// Collect blocks that should be deleted
FileDiff last = diffs.getLast();
BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
file.collectBlocksBeyondMax(max, info);
else
file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
示例2: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
public void collectBlocksAndClear(
INode.ReclaimContext reclaimContext, final INodeFile file) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.clearFile(reclaimContext);
return;
}
// find max file size.
final long max;
FileDiff diff = getDiffs().getLast();
if (isCurrentFileDeleted()) {
max = diff == null? 0: diff.getFileSize();
} else {
max = file.computeFileSize();
}
// Collect blocks that should be deleted
FileDiff last = diffs.getLast();
BlockInfo[] snapshotBlocks = last == null ? null : last.getBlocks();
if(snapshotBlocks == null)
file.collectBlocksBeyondMax(max, reclaimContext.collectedBlocks());
else
file.collectBlocksBeyondSnapshot(snapshotBlocks,
reclaimContext.collectedBlocks());
}
示例3: updateQuotaAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
long oldDiskspace = file.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = file.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = file.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
}
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - file.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
示例4: collectBlocksAndClear
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
/**
* If some blocks at the end of the block list no longer belongs to
* any inode, collect them and update the block list.
*/
private void collectBlocksAndClear(final INodeFile file,
final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
// check if everything is deleted.
if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
file.destroyAndCollectBlocks(info, removedINodes);
return;
}
// find max file size.
final long max;
if (isCurrentFileDeleted()) {
final FileDiff last = getDiffs().getLast();
max = last == null? 0: last.getFileSize();
} else {
max = file.computeFileSize();
}
collectBlocksBeyondMax(file, max, info);
}
示例5: updateQuotaAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
private static Quota.Counts updateQuotaAndCollectBlocks(
INodeFile currentINode, FileDiff removed,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
long oldDiskspace = currentINode.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = currentINode.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
* replication;
}
}
Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
示例6: changedBetweenSnapshots
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
if (diffIndexPair == null) {
return false;
}
int earlierDiffIndex = diffIndexPair[0];
int laterDiffIndex = diffIndexPair[1];
final List<FileDiff> diffList = diffs.asList();
final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
final long laterLength = laterDiffIndex == diffList.size() ? file
.computeFileSize(true, false) : diffList.get(laterDiffIndex)
.getFileSize();
if (earlierLength != laterLength) { // file length has been changed
return true;
}
INodeFileAttributes earlierAttr = null; // check the metadata
for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
FileDiff diff = diffList.get(i);
if (diff.snapshotINode != null) {
earlierAttr = diff.snapshotINode;
break;
}
}
if (earlierAttr == null) { // no meta-change at all, return false
return false;
}
INodeFileAttributes laterAttr = diffs.getSnapshotINode(
Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
file);
return !earlierAttr.metadataEquals(laterAttr);
}
示例7: FileDiff
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
FileDiff(int snapshotId, INodeFile file) {
super(snapshotId, null, null);
fileSize = file.computeFileSize();
blocks = null;
}
示例8: FileDiff
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
FileDiff(int snapshotId, INodeFile file) {
super(snapshotId, null, null);
fileSize = file.computeFileSize();
}
示例9: FileDiff
import org.apache.hadoop.hdfs.server.namenode.INodeFile; //导入方法依赖的package包/类
private FileDiff(Snapshot snapshot, INodeFile file) {
super(snapshot, null, null);
fileSize = file.computeFileSize();
}