本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INodeReference类的典型用法代码示例。如果您正苦于以下问题:Java INodeReference类的具体用法?Java INodeReference怎么用?Java INodeReference使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
INodeReference类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了INodeReference类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeINodeReferenceWithCount
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
public void writeINodeReferenceWithCount(
INodeReference.WithCount withCount, DataOutput out,
boolean writeUnderConstruction) throws IOException {
final INode referred = withCount.getReferredINode();
final long id = withCount.getId();
final boolean firstReferred = !referenceMap.containsKey(id);
out.writeBoolean(firstReferred);
if (firstReferred) {
FSImageSerialization.saveINode2Image(referred, out,
writeUnderConstruction, this);
referenceMap.put(id, withCount);
} else {
out.writeLong(id);
}
}
示例2: loadINodeReferenceWithCount
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
public INodeReference.WithCount loadINodeReferenceWithCount(
boolean isSnapshotINode, DataInput in, FSImageFormat.Loader loader
) throws IOException {
final boolean firstReferred = in.readBoolean();
final INodeReference.WithCount withCount;
if (firstReferred) {
final INode referred = loader.loadINodeWithLocalName(isSnapshotINode,
in, true);
withCount = new INodeReference.WithCount(null, referred);
referenceMap.put(withCount.getId(), withCount);
} else {
final long id = in.readLong();
withCount = referenceMap.get(id);
}
return withCount;
}
示例3: loadINodeReference
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
private INodeReference loadINodeReference(
INodeReferenceSection.INodeReference r) throws IOException {
long referredId = r.getReferredId();
INode referred = fsDir.getInode(referredId);
WithCount withCount = (WithCount) referred.getParentReference();
if (withCount == null) {
withCount = new INodeReference.WithCount(null, referred);
}
final INodeReference ref;
if (r.hasDstSnapshotId()) { // DstReference
ref = new INodeReference.DstReference(null, withCount,
r.getDstSnapshotId());
} else {
ref = new INodeReference.WithName(null, withCount, r.getName()
.toByteArray(), r.getLastSnapshotId());
}
return ref;
}
示例4: loadSnapshotDiffSection
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
/**
* Load the snapshot diff section from fsimage.
*/
public void loadSnapshotDiffSection(InputStream in) throws IOException {
final List<INodeReference> refList = parent.getLoaderContext()
.getRefList();
while (true) {
SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
.parseDelimitedFrom(in);
if (entry == null) {
break;
}
long inodeId = entry.getInodeId();
INode inode = fsDir.getInode(inodeId);
SnapshotDiffSection.DiffEntry.Type type = entry.getType();
switch (type) {
case FILEDIFF:
loadFileDiffList(in, inode.asFile(), entry.getNumOfDiff());
break;
case DIRECTORYDIFF:
loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff(),
refList);
break;
}
}
}
示例5: serializeSnapshotDiffSection
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
/**
* save all the snapshot diff to fsimage
*/
public void serializeSnapshotDiffSection(OutputStream out)
throws IOException {
INodeMap inodesMap = fsn.getFSDirectory().getINodeMap();
final List<INodeReference> refList = parent.getSaverContext()
.getRefList();
int i = 0;
Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
while (iter.hasNext()) {
INodeWithAdditionalFields inode = iter.next();
if (inode.isFile()) {
serializeFileDiffList(inode.asFile(), out);
} else if (inode.isDirectory()) {
serializeDirDiffList(inode.asDirectory(), refList, out);
}
++i;
if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
context.checkCancelled();
}
}
parent.commitSection(headers,
FSImageFormatProtobuf.SectionName.SNAPSHOT_DIFF);
}
示例6: findRenameTargetPath
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
/**
* We just found a deleted WithName node as the source of a rename operation.
* However, we should include it in our snapshot diff report as rename only
* if the rename target is also under the same snapshottable directory.
*/
private byte[][] findRenameTargetPath(final INodeDirectory snapshotRoot,
INodeReference.WithName wn, final int snapshotId) {
INode inode = wn.getReferredINode();
final LinkedList<byte[]> ancestors = Lists.newLinkedList();
while (inode != null) {
if (inode == snapshotRoot) {
return ancestors.toArray(new byte[ancestors.size()][]);
}
if (inode instanceof INodeReference.WithCount) {
inode = ((WithCount) inode).getParentRef(snapshotId);
} else {
INode parent = inode.getParentReference() != null ? inode
.getParentReference() : inode.getParent();
if (parent != null && parent instanceof INodeDirectory) {
int sid = parent.asDirectory().searchChild(inode);
if (sid < snapshotId) {
return null;
}
}
if (!(parent instanceof WithCount)) {
ancestors.addFirst(inode.getLocalNameBytes());
}
inode = parent;
}
}
return null;
}
示例7: loadINodeReferenceSection
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
/**
* The sequence of the ref node in refList must be strictly the same with
* the sequence in fsimage
*/
public void loadINodeReferenceSection(InputStream in) throws IOException {
final List<INodeReference> refList = parent.getLoaderContext()
.getRefList();
while (true) {
INodeReferenceSection.INodeReference e = INodeReferenceSection
.INodeReference.parseDelimitedFrom(in);
if (e == null) {
break;
}
INodeReference ref = loadINodeReference(e);
refList.add(ref);
}
}
示例8: loadDeletedList
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
/**
* Load the deleted list in a DirectoryDiff
*/
private List<INode> loadDeletedList(final List<INodeReference> refList,
InputStream in, INodeDirectory dir, List<Long> deletedNodes,
List<Integer> deletedRefNodes)
throws IOException {
List<INode> dlist = new ArrayList<INode>(deletedRefNodes.size()
+ deletedNodes.size());
// load non-reference inodes
for (long deletedId : deletedNodes) {
INode deleted = fsDir.getInode(deletedId);
dlist.add(deleted);
addToDeletedList(deleted, dir);
}
// load reference nodes in the deleted list
for (int refId : deletedRefNodes) {
INodeReference deletedRef = refList.get(refId);
dlist.add(deletedRef);
addToDeletedList(deletedRef, dir);
}
Collections.sort(dlist, new Comparator<INode>() {
@Override
public int compare(INode n1, INode n2) {
return n1.compareTo(n2.getLocalNameBytes());
}
});
return dlist;
}
示例9: serializeINodeReferenceSection
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
/**
* This can only be called after serializing both INode_Dir and SnapshotDiff
*/
public void serializeINodeReferenceSection(OutputStream out)
throws IOException {
final List<INodeReference> refList = parent.getSaverContext()
.getRefList();
for (INodeReference ref : refList) {
INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref);
rb.build().writeDelimitedTo(out);
}
parent.commitSection(headers, SectionName.INODE_REFERENCE);
}
示例10: buildINodeReference
import org.apache.hadoop.hdfs.server.namenode.INodeReference; //导入依赖的package包/类
private INodeReferenceSection.INodeReference.Builder buildINodeReference(
INodeReference ref) throws IOException {
INodeReferenceSection.INodeReference.Builder rb =
INodeReferenceSection.INodeReference.newBuilder().
setReferredId(ref.getId());
if (ref instanceof WithName) {
rb.setLastSnapshotId(((WithName) ref).getLastSnapshotId()).setName(
ByteString.copyFrom(ref.getLocalNameBytes()));
} else if (ref instanceof DstReference) {
rb.setDstSnapshotId(ref.getDstSnapshotId());
}
return rb;
}