当前位置: 首页>>代码示例>>Java>>正文


Java INodeSection.parseDelimitedFrom方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.parseDelimitedFrom方法的典型用法代码示例。如果您正苦于以下问题:Java INodeSection.parseDelimitedFrom方法的具体用法?Java INodeSection.parseDelimitedFrom怎么用?Java INodeSection.parseDelimitedFrom使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection的用法示例。


在下文中一共展示了INodeSection.parseDelimitedFrom方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadDirectoriesInINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
/**
 * Load the filenames of the directories from the INode section.
 */
private void loadDirectoriesInINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  LOG.info("Loading directories in INode section.");
  int numDirs = 0;
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INode p = INode.parseDelimitedFrom(in);
    if (LOG.isDebugEnabled() && i % 10000 == 0) {
      LOG.debug("Scanned {} inodes.", i);
    }
    if (p.hasDirectory()) {
      metadataMap.putDir(p);
      numDirs++;
    }
  }
  LOG.info("Found {} directories in INode section.", numDirs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:PBImageTextWriter.java

示例2: dumpINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void dumpINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  out.print("<INodeSection>");
  o("lastInodeId", s.getLastInodeId());
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    out.print("<inode>");
    o("id", p.getId()).o("type", p.getType()).o("name",
        p.getName().toStringUtf8());

    if (p.hasFile()) {
      dumpINodeFile(p.getFile());
    } else if (p.hasDirectory()) {
      dumpINodeDirectory(p.getDirectory());
    } else if (p.hasSymlink()) {
      dumpINodeSymlink(p.getSymlink());
    }

    out.print("</inode>\n");
  }
  out.print("</INodeSection>\n");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:PBImageXmlWriter.java

示例3: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in, StartupProgress prog,
    Step currentStep) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.dir.resetLastInodeId(s.getLastInodeId());
  long numInodes = s.getNumInodes();
  LOG.info("Loading " + numInodes + " INodes.");
  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
  for (int i = 0; i < numInodes; ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
    counter.increment();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatPBINode.java

示例4: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.dir.resetLastInodeId(s.getLastInodeId());
  LOG.info("Loading " + s.getNumInodes() + " INodes.");
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FSImageFormatPBINode.java

示例5: outputINodes

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void outputINodes(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  LOG.info("Found {} INodes in the INode section", s.getNumInodes());
  long ignored = 0;
  long ignoredSnapshots = 0;
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INode p = INode.parseDelimitedFrom(in);
    try {
      String parentPath = metadataMap.getParentPath(p.getId());
      out.println(getEntry(parentPath, p));
    } catch (IOException ioe) {
      ignored++;
      if (!(ioe instanceof IgnoreSnapshotException)) {
        LOG.warn("Exception caught, ignoring node:{}", p.getId(), ioe);
      } else {
        ignoredSnapshots++;
        if (LOG.isDebugEnabled()) {
          LOG.debug("Exception caught, ignoring node:{}.", p.getId(), ioe);
        }
      }
    }

    if (LOG.isDebugEnabled() && i % 100000 == 0) {
      LOG.debug("Outputted {} INodes.", i);
    }
  }
  if (ignored > 0) {
    LOG.warn("Ignored {} nodes, including {} in snapshots. Please turn on"
            + " debug log for details", ignored, ignoredSnapshots);
  }
  LOG.info("Outputted {} INodes.", s.getNumInodes());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:PBImageTextWriter.java

示例6: run

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FileDistributionCalculator.java

示例7: outputINodes

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void outputINodes(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  LOG.info("Found {} INodes in the INode section", s.getNumInodes());
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INode p = INode.parseDelimitedFrom(in);
    String parentPath = metadataMap.getParentPath(p.getId());
    out.println(getEntry(parentPath, p));

    if (LOG.isDebugEnabled() && i % 100000 == 0) {
      LOG.debug("Outputted {} INodes.", i);
    }
  }
  LOG.info("Outputted {} INodes.", s.getNumInodes());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:PBImageTextWriter.java

示例8: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.resetLastInodeId(s.getLastInodeId());
  LOG.info("Loading " + s.getNumInodes() + " INodes.");
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:15,代码来源:FSImageFormatPBINode.java

示例9: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void loadINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Found " + s.getNumInodes() + " inodes in inode section");
  }
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    inodes.put(p.getId(), p);
    if (LOG.isTraceEnabled()) {
      LOG.trace("Loaded inode id " + p.getId() + " type " + p.getType()
          + " name '" + p.getName().toStringUtf8() + "'");
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:15,代码来源:LsrPBImage.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.parseDelimitedFrom方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。