本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.parseDelimitedFrom方法的典型用法代码示例。如果您正苦于以下问题:Java INodeSection.parseDelimitedFrom方法的具体用法?Java INodeSection.parseDelimitedFrom怎么用?Java INodeSection.parseDelimitedFrom使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection
的用法示例。
在下文中一共展示了INodeSection.parseDelimitedFrom方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadDirectoriesInINodeSection
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
/**
* Load the filenames of the directories from the INode section.
*/
private void loadDirectoriesInINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Loading directories in INode section.");
int numDirs = 0;
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
if (LOG.isDebugEnabled() && i % 10000 == 0) {
LOG.debug("Scanned {} inodes.", i);
}
if (p.hasDirectory()) {
metadataMap.putDir(p);
numDirs++;
}
}
LOG.info("Found {} directories in INode section.", numDirs);
}
示例2: dumpINodeSection
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void dumpINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
out.print("<INodeSection>");
o("lastInodeId", s.getLastInodeId());
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
out.print("<inode>");
o("id", p.getId()).o("type", p.getType()).o("name",
p.getName().toStringUtf8());
if (p.hasFile()) {
dumpINodeFile(p.getFile());
} else if (p.hasDirectory()) {
dumpINodeDirectory(p.getDirectory());
} else if (p.hasSymlink()) {
dumpINodeSymlink(p.getSymlink());
}
out.print("</inode>\n");
}
out.print("</INodeSection>\n");
}
示例3: loadINodeSection
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in, StartupProgress prog,
Step currentStep) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
fsn.dir.resetLastInodeId(s.getLastInodeId());
long numInodes = s.getNumInodes();
LOG.info("Loading " + numInodes + " INodes.");
prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
for (int i = 0; i < numInodes; ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getId() == INodeId.ROOT_INODE_ID) {
loadRootINode(p);
} else {
INode n = loadINode(p);
dir.addToInodeMap(n);
}
counter.increment();
}
}
示例4: loadINodeSection
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
fsn.dir.resetLastInodeId(s.getLastInodeId());
LOG.info("Loading " + s.getNumInodes() + " INodes.");
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getId() == INodeId.ROOT_INODE_ID) {
loadRootINode(p);
} else {
INode n = loadINode(p);
dir.addToInodeMap(n);
}
}
}
示例5: outputINodes
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void outputINodes(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
long ignored = 0;
long ignoredSnapshots = 0;
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
try {
String parentPath = metadataMap.getParentPath(p.getId());
out.println(getEntry(parentPath, p));
} catch (IOException ioe) {
ignored++;
if (!(ioe instanceof IgnoreSnapshotException)) {
LOG.warn("Exception caught, ignoring node:{}", p.getId(), ioe);
} else {
ignoredSnapshots++;
if (LOG.isDebugEnabled()) {
LOG.debug("Exception caught, ignoring node:{}.", p.getId(), ioe);
}
}
}
if (LOG.isDebugEnabled() && i % 100000 == 0) {
LOG.debug("Outputted {} INodes.", i);
}
}
if (ignored > 0) {
LOG.warn("Ignored {} nodes, including {} in snapshots. Please turn on"
+ " debug log for details", ignored, ignoredSnapshots);
}
LOG.info("Outputted {} INodes.", s.getNumInodes());
}
示例6: run
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void run(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getType() == INodeSection.INode.Type.FILE) {
++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
fileSize += b.getNumBytes();
}
maxFileSize = Math.max(fileSize, maxFileSize);
totalSpace += fileSize * f.getReplication();
int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
.ceil((double)fileSize / steps);
++distribution[bucket];
} else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
++totalDirectories;
}
if (i % (1 << 20) == 0) {
out.println("Processed " + i + " inodes.");
}
}
}
示例7: outputINodes
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void outputINodes(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
String parentPath = metadataMap.getParentPath(p.getId());
out.println(getEntry(parentPath, p));
if (LOG.isDebugEnabled() && i % 100000 == 0) {
LOG.debug("Outputted {} INodes.", i);
}
}
LOG.info("Outputted {} INodes.", s.getNumInodes());
}
示例8: loadINodeSection
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
fsn.resetLastInodeId(s.getLastInodeId());
LOG.info("Loading " + s.getNumInodes() + " INodes.");
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getId() == INodeId.ROOT_INODE_ID) {
loadRootINode(p);
} else {
INode n = loadINode(p);
dir.addToInodeMap(n);
}
}
}
示例9: loadINodeSection
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void loadINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Found " + s.getNumInodes() + " inodes in inode section");
}
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
inodes.put(p.getId(), p);
if (LOG.isTraceEnabled()) {
LOG.trace("Loaded inode id " + p.getId() + " type " + p.getType()
+ " name '" + p.getName().toStringUtf8() + "'");
}
}
}