当前位置: 首页>>代码示例>>Java>>正文


Java INodeSection类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection的典型用法代码示例。如果您正苦于以下问题:Java INodeSection类的具体用法?Java INodeSection怎么用?Java INodeSection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


INodeSection类属于org.apache.hadoop.hdfs.server.namenode.FsImageProto包,在下文中一共展示了INodeSection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadRootINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final QuotaCounts q = root.getQuotaCounts();
  final long nsQuota = q.getNameSpace();
  final long dsQuota = q.getStorageSpace();
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
  if (typeQuotas.anyGreaterOrEqual(0)) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSImageFormatPBINode.java

示例2: buildINodeFile

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
public static INodeSection.INodeFile.Builder buildINodeFile(
    INodeFileAttributes file, final SaverContext state) {
  INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
      .setAccessTime(file.getAccessTime())
      .setModificationTime(file.getModificationTime())
      .setPermission(buildPermissionStatus(file, state.getStringMap()))
      .setPreferredBlockSize(file.getPreferredBlockSize())
      .setReplication(file.getFileReplication())
      .setStoragePolicyID(file.getLocalStoragePolicyID());

  AclFeature f = file.getAclFeature();
  if (f != null) {
    b.setAcl(buildAclEntries(f, state.getStringMap()));
  }
  XAttrFeature xAttrFeature = file.getXAttrFeature();
  if (xAttrFeature != null) {
    b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
  }
  return b;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSImageFormatPBINode.java

示例3: buildINodeDirectory

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
public static INodeSection.INodeDirectory.Builder buildINodeDirectory(
    INodeDirectoryAttributes dir, final SaverContext state) {
  QuotaCounts quota = dir.getQuotaCounts();
  INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory
      .newBuilder().setModificationTime(dir.getModificationTime())
      .setNsQuota(quota.getNameSpace())
      .setDsQuota(quota.getStorageSpace())
      .setPermission(buildPermissionStatus(dir, state.getStringMap()));

  if (quota.getTypeSpaces().anyGreaterOrEqual(0)) {
    b.setTypeQuotas(buildQuotaByStorageTypeEntries(quota));
  }

  AclFeature f = dir.getAclFeature();
  if (f != null) {
    b.setAcl(buildAclEntries(f, state.getStringMap()));
  }
  XAttrFeature xAttrFeature = dir.getXAttrFeature();
  if (xAttrFeature != null) {
    b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
  }
  return b;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:FSImageFormatPBINode.java

示例4: serializeINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
void serializeINodeSection(OutputStream out) throws IOException {
  INodeMap inodesMap = fsn.dir.getINodeMap();

  INodeSection.Builder b = INodeSection.newBuilder()
      .setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size());
  INodeSection s = b.build();
  s.writeDelimitedTo(out);

  int i = 0;
  Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    save(out, n);
    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSImageFormatPBINode.java

示例5: save

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void save(OutputStream out, INodeFile n) throws IOException {
  INodeSection.INodeFile.Builder b = buildINodeFile(n,
      parent.getSaverContext());

  if (n.getBlocks() != null) {
    for (Block block : n.getBlocks()) {
      b.addBlocks(PBHelper.convert(block));
    }
  }

  FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
  if (uc != null) {
    INodeSection.FileUnderConstructionFeature f =
        INodeSection.FileUnderConstructionFeature
        .newBuilder().setClientName(uc.getClientName())
        .setClientMachine(uc.getClientMachine()).build();
    b.setFileUC(f);
  }

  INodeSection.INode r = buildINodeCommon(n)
      .setType(INodeSection.INode.Type.FILE).setFile(b).build();
  r.writeDelimitedTo(out);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:FSImageFormatPBINode.java

示例6: loadDirectoriesInINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
/**
 * Load the filenames of the directories from the INode section.
 */
private void loadDirectoriesInINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  LOG.info("Loading directories in INode section.");
  int numDirs = 0;
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INode p = INode.parseDelimitedFrom(in);
    if (LOG.isDebugEnabled() && i % 10000 == 0) {
      LOG.debug("Scanned {} inodes.", i);
    }
    if (p.hasDirectory()) {
      metadataMap.putDir(p);
      numDirs++;
    }
  }
  LOG.info("Found {} directories in INode section.", numDirs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:PBImageTextWriter.java

示例7: dumpINodeFile

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("perferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));

  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:PBImageXmlWriter.java

示例8: dumpINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void dumpINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  out.print("<INodeSection>");
  o("lastInodeId", s.getLastInodeId());
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    out.print("<inode>");
    o("id", p.getId()).o("type", p.getType()).o("name",
        p.getName().toStringUtf8());

    if (p.hasFile()) {
      dumpINodeFile(p.getFile());
    } else if (p.hasDirectory()) {
      dumpINodeDirectory(p.getDirectory());
    } else if (p.hasSymlink()) {
      dumpINodeSymlink(p.getSymlink());
    }

    out.print("</inode>\n");
  }
  out.print("</INodeSection>\n");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:PBImageXmlWriter.java

示例9: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
void loadINodeSection(InputStream in, StartupProgress prog,
    Step currentStep) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.dir.resetLastInodeId(s.getLastInodeId());
  long numInodes = s.getNumInodes();
  LOG.info("Loading " + numInodes + " INodes.");
  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
  for (int i = 0; i < numInodes; ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
    counter.increment();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatPBINode.java

示例10: loadRootINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final QuotaCounts q = root.getQuotaCounts();
  final long nsQuota = q.getNameSpace();
  final long dsQuota = q.getStorageSpace();
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
  if (typeQuotas.anyGreaterOrEqual(0)) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  final AclFeature af = root.getFeature(AclFeature.class);
  if (af != null) {
    dir.rootDir.addAclFeature(af);
  }
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
  dir.addRootDirToEncryptionZone(f);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:FSImageFormatPBINode.java

示例11: buildINodeFile

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
public static INodeSection.INodeFile.Builder buildINodeFile(
    INodeFileAttributes file, final SaverContext state) {
  INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
      .setAccessTime(file.getAccessTime())
      .setModificationTime(file.getModificationTime())
      .setPermission(buildPermissionStatus(file, state.getStringMap()))
      .setPreferredBlockSize(file.getPreferredBlockSize())
      .setReplication(file.getFileReplication())
      .setStoragePolicyID(file.getLocalStoragePolicyID())
      .setIsStriped(file.isStriped());

  AclFeature f = file.getAclFeature();
  if (f != null) {
    b.setAcl(buildAclEntries(f, state.getStringMap()));
  }
  XAttrFeature xAttrFeature = file.getXAttrFeature();
  if (xAttrFeature != null) {
    b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
  }
  return b;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:FSImageFormatPBINode.java

示例12: save

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void save(OutputStream out, INodeFile n) throws IOException {
  INodeSection.INodeFile.Builder b = buildINodeFile(n,
      parent.getSaverContext());
  BlockInfo[] blocks = n.getBlocks();

  if (blocks != null) {
    for (Block block : n.getBlocks()) {
      b.addBlocks(PBHelperClient.convert(block));
    }
  }

  FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
  if (uc != null) {
    INodeSection.FileUnderConstructionFeature f =
        INodeSection.FileUnderConstructionFeature
        .newBuilder().setClientName(uc.getClientName())
        .setClientMachine(uc.getClientMachine()).build();
    b.setFileUC(f);
  }

  INodeSection.INode r = buildINodeCommon(n)
      .setType(INodeSection.INode.Type.FILE).setFile(b).build();
  r.writeDelimitedTo(out);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:FSImageFormatPBINode.java

示例13: dumpINodeFile

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void dumpINodeFile(INodeSection.INodeFile f) {
  o("replication", f.getReplication()).o("mtime", f.getModificationTime())
      .o("atime", f.getAccessTime())
      .o("preferredBlockSize", f.getPreferredBlockSize())
      .o("permission", dumpPermission(f.getPermission()));
  dumpAcls(f.getAcl());
  if (f.getBlocksCount() > 0) {
    out.print("<blocks>");
    for (BlockProto b : f.getBlocksList()) {
      out.print("<block>");
      o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
          b.getNumBytes());
      out.print("</block>\n");
    }
    out.print("</blocks>\n");
  }

  if (f.hasFileUC()) {
    INodeSection.FileUnderConstructionFeature u = f.getFileUC();
    out.print("<file-under-construction>");
    o("clientName", u.getClientName()).o("clientMachine",
        u.getClientMachine());
    out.print("</file-under-construction>\n");
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:PBImageXmlWriter.java

示例14: loadINodeDirectory

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
    LoaderContext state) {
  assert n.getType() == INodeSection.INode.Type.DIRECTORY;
  INodeSection.INodeDirectory d = n.getDirectory();

  final PermissionStatus permissions = loadPermission(d.getPermission(),
      state.getStringTable());
  final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
      .toByteArray(), permissions, d.getModificationTime());

  final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota();
  if (nsQuota >= 0 || dsQuota >= 0) {
    dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
  }

  if (d.hasAcl()) {
    dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
        state.getStringTable())));
  }
  if (d.hasXAttrs()) {
    dir.addXAttrFeature(new XAttrFeature(
        loadXAttrs(d.getXAttrs(), state.getStringTable())));
  }
  return dir;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:26,代码来源:FSImageFormatPBINode.java

示例15: loadRootINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入依赖的package包/类
private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final Quota.Counts q = root.getQuotaCounts();
  final long nsQuota = q.get(Quota.NAMESPACE);
  final long dsQuota = q.get(Quota.DISKSPACE);
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:FSImageFormatPBINode.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。