本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.Quota.Counts方法的典型用法代码示例。如果您正苦于以下问题:Java Quota.Counts方法的具体用法?Java Quota.Counts怎么用?Java Quota.Counts使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.Quota
的用法示例。
在下文中一共展示了Quota.Counts方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: combinePosteriorAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
Quota.Counts combinePosteriorAndCollectBlocks(
final INodeDirectory currentDir, final DirectoryDiff posterior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
final Quota.Counts counts = Quota.Counts.newInstance();
diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
/** Collect blocks for deleted files. */
@Override
public void process(INode inode) {
if (inode != null) {
inode.computeQuotaUsage(counts, false);
inode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
}
}
});
return counts;
}
示例2: updateQuotaAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
public Quota.Counts updateQuotaAndCollectBlocks(INodeFile file,
FileDiff removed, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
long oldDiskspace = file.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = file.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = file.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / file.getBlockReplication() * replication;
}
}
collectBlocksAndClear(file, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - file.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
示例3: computeQuotaUsage
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache, int lastSnapshotId) {
if ((useCache && isQuotaSet()) || lastSnapshotId == Snapshot.INVALID_ID) {
return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
}
Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
for (INode child : childrenList) {
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
}
counts.add(Quota.NAMESPACE, 1);
return counts;
}
示例4: updateQuotaAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
private static Quota.Counts updateQuotaAndCollectBlocks(
INodeFile currentINode, FileDiff removed,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
FileWithSnapshot sFile = (FileWithSnapshot) currentINode;
long oldDiskspace = currentINode.diskspaceConsumed();
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = currentINode.getBlockReplication();
if (currentRepl == 0) {
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
} else if (replication > currentRepl) {
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
* replication;
}
}
Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes);
long dsDelta = oldDiskspace - currentINode.diskspaceConsumed();
return Quota.Counts.newInstance(0, dsDelta);
}
示例5: cleanSubtree
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null) { // delete the current file
recordModification(prior, null);
isCurrentFileDeleted = true;
Util.collectBlocksAndClear(this, collectedBlocks, removedINodes);
return Quota.Counts.newInstance();
} else { // delete a snapshot
prior = getDiffs().updatePrior(snapshot, prior);
return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks,
removedINodes, countDiffChange);
}
}
示例6: destroyCreatedList
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/** clear the created list */
private Quota.Counts destroyCreatedList(
final INodeDirectoryWithSnapshot currentINode,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
Quota.Counts counts = Quota.Counts.newInstance();
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
c.computeQuotaUsage(counts, true);
c.destroyAndCollectBlocks(collectedBlocks, removedINodes);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
createdList.clear();
return counts;
}
示例7: undoRename4DstParent
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
* Undo the rename operation for the dst tree, i.e., if the rename operation
* (with OVERWRITE option) removes a file/dir from the dst tree, add it back
* and delete possible record in the deleted list.
*/
public void undoRename4DstParent(final INode deletedChild,
Snapshot latestSnapshot) throws QuotaExceededException {
boolean removeDeletedChild = diffs.removeChild(ListType.DELETED,
deletedChild);
// pass null for inodeMap since the parent node will not get replaced when
// undoing rename
final boolean added = addChild(deletedChild, true, removeDeletedChild ? null
: latestSnapshot, null);
// update quota usage if adding is successfully and the old child has not
// been stored in deleted list before
if (added && !removeDeletedChild) {
final Quota.Counts counts = deletedChild.computeQuotaUsage();
addSpaceConsumed(counts.get(Quota.NAMESPACE),
counts.get(Quota.DISKSPACE), false);
}
}
示例8: combinePosteriorAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode,
FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
return currentINode.getFileWithSnapshotFeature()
.updateQuotaAndCollectBlocks(currentINode, posterior, collectedBlocks,
removedINodes);
}
示例9: destroyDiffAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
@Override
Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
return currentINode.getFileWithSnapshotFeature()
.updateQuotaAndCollectBlocks(currentINode, this, collectedBlocks,
removedINodes);
}
示例10: removeSnapshot
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
* Remove the snapshot with the given name from {@link #snapshotsByNames},
* and delete all the corresponding DirectoryDiff.
*
* @param snapshotRoot The directory where we take snapshots
* @param snapshotName The name of the snapshot to be removed
* @param collectedBlocks Used to collect information to update blocksMap
* @return The removed snapshot. Null if no snapshot with the given name
* exists.
*/
public Snapshot removeSnapshot(INodeDirectory snapshotRoot,
String snapshotName, BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) throws SnapshotException {
final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
if (i < 0) {
throw new SnapshotException("Cannot delete snapshot " + snapshotName
+ " from path " + snapshotRoot.getFullPathName()
+ ": the snapshot does not exist.");
} else {
final Snapshot snapshot = snapshotsByNames.get(i);
int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
try {
Quota.Counts counts = snapshotRoot.cleanSubtree(snapshot.getId(),
prior, collectedBlocks, removedINodes);
INodeDirectory parent = snapshotRoot.getParent();
if (parent != null) {
// there will not be any WithName node corresponding to the deleted
// snapshot, thus only update the quota usage in the current tree
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-counts.get(Quota.DISKSPACE), true);
}
} catch(QuotaExceededException e) {
INode.LOG.error("BUG: removeSnapshot increases namespace usage.", e);
}
// remove from snapshotsByNames after successfully cleaning the subtree
snapshotsByNames.remove(i);
return snapshot;
}
}
示例11: destroyCreatedList
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/** clear the created list */
private Quota.Counts destroyCreatedList(final INodeDirectory currentINode,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
Quota.Counts counts = Quota.Counts.newInstance();
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
c.computeQuotaUsage(counts, true);
c.destroyAndCollectBlocks(collectedBlocks, removedINodes);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
createdList.clear();
return counts;
}
示例12: testRenameExceedQuota
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test
public void testRenameExceedQuota() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path sub_dir2 = new Path(dir2, "subdir");
final Path subfile_dir2 = new Path(sub_dir2, "subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);
final Path foo = new Path(dir1, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 1 (already has
// dir2, sub_dir2, subfile_dir2, and s2)
hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
// rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2.
// FSDirectory#verifyQuota4Rename will pass since foo only be counted
// as 1 in NS quota. The rename operation will succeed while the real quota
// of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
// subfile_dir2 in deleted list, new subfile, s1 in new subfile).
hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);
// check dir2
INode dir2Node = fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.asDirectory().isSnapshottable());
Quota.Counts counts = dir2Node.computeQuotaUsage();
assertEquals(7, counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2, counts.get(Quota.DISKSPACE));
}
示例13: testRenameExceedQuota
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test
public void testRenameExceedQuota() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path sub_dir2 = new Path(dir2, "subdir");
final Path subfile_dir2 = new Path(sub_dir2, "subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);
final Path foo = new Path(dir1, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 1 (already has
// dir2, sub_dir2, subfile_dir2, and s2)
hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
// rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2.
// FSDirectory#verifyQuota4Rename will pass since foo only be counted
// as 1 in NS quota. The rename operation will succeed while the real quota
// of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
// subfile_dir2 in deleted list, new subfile, s1 in new subfile).
hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);
// check dir2
INode dir2Node = fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
Quota.Counts counts = dir2Node.computeQuotaUsage();
assertEquals(7, counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2, counts.get(Quota.DISKSPACE));
}
示例14: removeSnapshot
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
/**
* Remove the snapshot with the given name from {@link #snapshotsByNames},
* and delete all the corresponding DirectoryDiff.
*
* @param snapshotName The name of the snapshot to be removed
* @param collectedBlocks Used to collect information to update blocksMap
* @return The removed snapshot. Null if no snapshot with the given name
* exists.
*/
Snapshot removeSnapshot(String snapshotName,
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
throws SnapshotException {
final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
if (i < 0) {
throw new SnapshotException("Cannot delete snapshot " + snapshotName
+ " from path " + this.getFullPathName()
+ ": the snapshot does not exist.");
} else {
final Snapshot snapshot = snapshotsByNames.get(i);
Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot);
try {
Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks,
removedINodes, true);
INodeDirectory parent = getParent();
if (parent != null) {
// there will not be any WithName node corresponding to the deleted
// snapshot, thus only update the quota usage in the current tree
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-counts.get(Quota.DISKSPACE), true);
}
} catch(QuotaExceededException e) {
LOG.error("BUG: removeSnapshot increases namespace usage.", e);
}
// remove from snapshotsByNames after successfully cleaning the subtree
snapshotsByNames.remove(i);
return snapshot;
}
}
示例15: computeQuotaUsage4CurrentDirectory
import org.apache.hadoop.hdfs.server.namenode.Quota; //导入方法依赖的package包/类
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts) {
for(DirectoryDiff d : diffs) {
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
deleted.computeQuotaUsage(counts, false, Snapshot.CURRENT_STATE_ID);
}
}
counts.add(Quota.NAMESPACE, diffs.asList().size());
return counts;
}