本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeDirectory类的典型用法代码示例。如果您正苦于以下问题:Java INodeDirectory类的具体用法?Java INodeDirectory怎么用?Java INodeDirectory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
INodeDirectory类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeDirectory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: checkNestedSnapshottable
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
private void checkNestedSnapshottable(INodeDirectory dir, String path)
throws SnapshotException {
if (allowNestedSnapshots) {
return;
}
for(INodeDirectory s : snapshottables.values()) {
if (s.isAncestorDirectory(dir)) {
throw new SnapshotException(
"Nested snapshottable directories not allowed: path=" + path
+ ", the subdirectory " + s.getFullPathName()
+ " is already a snapshottable directory.");
}
if (dir.isAncestorDirectory(s)) {
throw new SnapshotException(
"Nested snapshottable directories not allowed: path=" + path
+ ", the ancestor " + s.getFullPathName()
+ " is already a snapshottable directory.");
}
}
}
示例2: setSnapshottable
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Set the given directory as a snapshottable directory.
* If the path is already a snapshottable directory, update the quota.
*/
public void setSnapshottable(final String path, boolean checkNestedSnapshottable)
throws IOException {
final INodesInPath iip = fsdir.getINodesInPath4Write(path);
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
if (checkNestedSnapshottable) {
checkNestedSnapshottable(d, path);
}
if (d.isSnapshottable()) {
//The directory is already a snapshottable directory.
d.setSnapshotQuota(DirectorySnapshottableFeature.SNAPSHOT_LIMIT);
} else {
d.addSnapshottableFeature();
}
addSnapshottable(d);
}
示例3: resetSnapshottable
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Set the given snapshottable directory to non-snapshottable.
*
* @throws SnapshotException if there are snapshots in the directory.
*/
public void resetSnapshottable(final String path) throws IOException {
final INodesInPath iip = fsdir.getINodesInPath4Write(path);
final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
DirectorySnapshottableFeature sf = d.getDirectorySnapshottableFeature();
if (sf == null) {
// the directory is already non-snapshottable
return;
}
if (sf.getNumSnapshots() > 0) {
throw new SnapshotException("The directory " + path + " has snapshot(s). "
+ "Please redo the operation after removing all the snapshots.");
}
if (d == fsdir.getRoot()) {
d.setSnapshotQuota(0);
} else {
d.removeSnapshottableFeature();
}
removeSnapshottable(d);
}
示例4: createSnapshot
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Create a snapshot of the given path.
* It is assumed that the caller will perform synchronization.
*
* @param iip the INodes resolved from the snapshottable directory's path
* @param snapshotName
* The name of the snapshot.
* @throws IOException
* Throw IOException when 1) the given path does not lead to an
* existing snapshottable directory, and/or 2) there exists a
* snapshot with the given name for the directory, and/or 3)
* snapshot number exceeds quota
*/
public String createSnapshot(final INodesInPath iip, String snapshotRoot,
String snapshotName) throws IOException {
INodeDirectory srcRoot = getSnapshottableRoot(iip);
if (snapshotCounter == getMaxSnapshotID()) {
// We have reached the maximum allowable snapshot ID and since we don't
// handle rollover we will fail all subsequent snapshot creation
// requests.
//
throw new SnapshotException(
"Failed to create the snapshot. The FileSystem has run out of " +
"snapshot IDs and ID rollover is not supported.");
}
srcRoot.addSnapshot(snapshotCounter, snapshotName);
//create success, update id
snapshotCounter++;
numSnapshots.getAndIncrement();
return Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
}
示例5: diff
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Compute the difference between two snapshots of a directory, or between a
* snapshot of the directory and its current tree.
*/
public SnapshotDiffReport diff(final INodesInPath iip,
final String snapshotRootPath, final String from,
final String to) throws IOException {
// Find the source root directory path where the snapshots were taken.
// All the check for path has been included in the valueOf method.
final INodeDirectory snapshotRoot = getSnapshottableRoot(iip);
if ((from == null || from.isEmpty())
&& (to == null || to.isEmpty())) {
// both fromSnapshot and toSnapshot indicate the current tree
return new SnapshotDiffReport(snapshotRootPath, from, to,
Collections.<DiffReportEntry> emptyList());
}
final SnapshotDiffInfo diffs = snapshotRoot
.getDirectorySnapshottableFeature().computeDiff(snapshotRoot, from, to);
return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
snapshotRootPath, from, to, Collections.<DiffReportEntry> emptyList());
}
示例6: Root
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
Root(INodeDirectory other) {
// Always preserve ACL, XAttr.
super(other, false, Lists.newArrayList(
Iterables.filter(Arrays.asList(other.getFeatures()), new Predicate<Feature>() {
@Override
public boolean apply(Feature input) {
if (AclFeature.class.isInstance(input)
|| XAttrFeature.class.isInstance(input)) {
return true;
}
return false;
}
}))
.toArray(new Feature[0]));
}
示例7: loadCreated
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Load a node stored in the created list from fsimage.
* @param createdNodeName The name of the created node.
* @param parent The directory that the created list belongs to.
* @return The created node.
*/
public static INode loadCreated(byte[] createdNodeName,
INodeDirectory parent) throws IOException {
// the INode in the created list should be a reference to another INode
// in posterior SnapshotDiffs or one of the current children
for (DirectoryDiff postDiff : parent.getDiffs()) {
final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,
createdNodeName);
if (d != null) {
return d;
} // else go to the next SnapshotDiff
}
// use the current child
INode currentChild = parent.getChild(createdNodeName,
Snapshot.CURRENT_STATE_ID);
if (currentChild == null) {
throw new IOException("Cannot find an INode associated with the INode "
+ DFSUtil.bytes2String(createdNodeName)
+ " in created list while loading FSImage.");
}
return currentChild;
}
示例8: loadDeletedList
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Load the deleted list from the fsimage.
*
* @param parent The directory that the deleted list belongs to.
* @param createdList The created list associated with the deleted list in
* the same Diff.
* @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance.
* @return The deleted list.
*/
private static List<INode> loadDeletedList(INodeDirectory parent,
List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
throws IOException {
int deletedSize = in.readInt();
List<INode> deletedList = new ArrayList<INode>(deletedSize);
for (int i = 0; i < deletedSize; i++) {
final INode deleted = loader.loadINodeWithLocalName(true, in, true);
deletedList.add(deleted);
// set parent: the parent field of an INode in the deleted list is not
// useful, but set the parent here to be consistent with the original
// fsdir tree.
deleted.setParent(parent);
if (deleted.isFile()) {
loader.updateBlocksMap(deleted.asFile());
}
}
return deletedList;
}
示例9: loadSnapshotList
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Load snapshots and snapshotQuota for a Snapshottable directory.
*
* @param snapshottableParent
* The snapshottable directory for loading.
* @param numSnapshots
* The number of snapshots that the directory has.
* @param loader
* The loader
*/
public static void loadSnapshotList(INodeDirectory snapshottableParent,
int numSnapshots, DataInput in, FSImageFormat.Loader loader)
throws IOException {
DirectorySnapshottableFeature sf = snapshottableParent
.getDirectorySnapshottableFeature();
Preconditions.checkArgument(sf != null);
for (int i = 0; i < numSnapshots; i++) {
// read snapshots
final Snapshot s = loader.getSnapshot(in);
s.getRoot().setParent(snapshottableParent);
sf.addSnapshot(s);
}
int snapshotQuota = in.readInt();
snapshottableParent.setSnapshotQuota(snapshotQuota);
}
示例10: loadDirectoryDiff
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Load {@link DirectoryDiff} from fsimage.
* @param parent The directory that the SnapshotDiff belongs to.
* @param in The {@link DataInput} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is
* using.
* @return A {@link DirectoryDiff}.
*/
private static DirectoryDiff loadDirectoryDiff(INodeDirectory parent,
DataInput in, FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot
final Snapshot snapshot = loader.getSnapshot(in);
// 2. Load DirectoryDiff#childrenSize
int childrenSize = in.readInt();
// 3. Load DirectoryDiff#snapshotINode
INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(
snapshot, in, loader);
// 4. Load the created list in SnapshotDiff#Diff
List<INode> createdList = loadCreatedList(parent, in);
// 5. Load the deleted list in SnapshotDiff#Diff
List<INode> deletedList = loadDeletedList(parent, createdList, in, loader);
// 6. Compose the SnapshotDiff
List<DirectoryDiff> diffs = parent.getDiffs().asList();
DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
deletedList, snapshotINode == snapshot.getRoot());
return sdiff;
}
示例11: destroyCreatedList
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/** clear the created list */
private QuotaCounts destroyCreatedList(
final BlockStoragePolicySuite bsps,
final INodeDirectory currentINode,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final List<INode> createdList = getList(ListType.CREATED);
for (INode c : createdList) {
c.computeQuotaUsage(bsps, counts, true);
c.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
// c should be contained in the children list, remove it
currentINode.removeChild(c);
}
createdList.clear();
return counts;
}
示例12: combinePosteriorAndCollectBlocks
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
final BlockStoragePolicySuite bsps,
final INodeDirectory currentDir, final DirectoryDiff posterior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes) {
final QuotaCounts counts = new QuotaCounts.Builder().build();
diff.combinePosterior(posterior.diff, new Diff.Processor<INode>() {
/** Collect blocks for deleted files. */
@Override
public void process(INode inode) {
if (inode != null) {
inode.computeQuotaUsage(bsps, counts, false);
inode.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
}
}
});
return counts;
}
示例13: getChild
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/** @return the child with the given name. */
INode getChild(byte[] name, boolean checkPosterior,
INodeDirectory currentDir) {
for(DirectoryDiff d = this; ; d = d.getPosterior()) {
final Container<INode> returned = d.diff.accessPrevious(name);
if (returned != null) {
// the diff is able to determine the inode
return returned.getElement();
} else if (!checkPosterior) {
// Since checkPosterior is false, return null, i.e. not found.
return null;
} else if (d.getPosterior() == null) {
// no more posterior diff, get from current inode.
return currentDir.getChild(name, Snapshot.CURRENT_STATE_ID);
}
}
}
示例14: removeChild
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/**
* Remove an inode from parent's children list. The caller of this method
* needs to make sure that parent is in the given snapshot "latest".
*/
public boolean removeChild(INodeDirectory parent, INode child,
int latestSnapshotId) {
// For a directory that is not a renamed node, if isInLatestSnapshot returns
// false, the directory is not in the latest snapshot, thus we do not need
// to record the removed child in any snapshot.
// For a directory that was moved/renamed, note that if the directory is in
// any of the previous snapshots, we will create a reference node for the
// directory while rename, and isInLatestSnapshot will return true in that
// scenario (if all previous snapshots have been deleted, isInLatestSnapshot
// still returns false). Thus if isInLatestSnapshot returns false, the
// directory node cannot be in any snapshot (not in current tree, nor in
// previous src tree). Thus we do not need to record the removed child in
// any snapshot.
ChildrenDiff diff = diffs.checkAndAddLatestSnapshotDiff(latestSnapshotId,
parent).diff;
UndoInfo<INode> undoInfo = diff.delete(child);
final boolean removed = parent.removeChild(child);
if (!removed && undoInfo != null) {
// remove failed, undo
diff.undoDelete(child, undoInfo);
}
return removed;
}
示例15: saveChild2Snapshot
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; //导入依赖的package包/类
/** Used to record the modification of a symlink node */
public INode saveChild2Snapshot(INodeDirectory currentINode,
final INode child, final int latestSnapshotId, final INode snapshotCopy) {
Preconditions.checkArgument(!child.isDirectory(),
"child is a directory, child=%s", child);
Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
final DirectoryDiff diff = diffs.checkAndAddLatestSnapshotDiff(
latestSnapshotId, currentINode);
if (diff.getChild(child.getLocalNameBytes(), false, currentINode) != null) {
// it was already saved in the latest snapshot earlier.
return child;
}
diff.diff.modify(snapshotCopy, child);
return child;
}