当前位置: 首页>>代码示例>>Java>>正文


Java INodeSection.INode方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode方法的典型用法代码示例。如果您正苦于以下问题:Java INodeSection.INode方法的具体用法?Java INodeSection.INode怎么用?Java INodeSection.INode使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection的用法示例。


在下文中一共展示了INodeSection.INode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadRootINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final QuotaCounts q = root.getQuotaCounts();
  final long nsQuota = q.getNameSpace();
  final long dsQuota = q.getStorageSpace();
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
  if (typeQuotas.anyGreaterOrEqual(0)) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSImageFormatPBINode.java

示例2: save

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void save(OutputStream out, INodeFile n) throws IOException {
  INodeSection.INodeFile.Builder b = buildINodeFile(n,
      parent.getSaverContext());

  if (n.getBlocks() != null) {
    for (Block block : n.getBlocks()) {
      b.addBlocks(PBHelper.convert(block));
    }
  }

  FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
  if (uc != null) {
    INodeSection.FileUnderConstructionFeature f =
        INodeSection.FileUnderConstructionFeature
        .newBuilder().setClientName(uc.getClientName())
        .setClientMachine(uc.getClientMachine()).build();
    b.setFileUC(f);
  }

  INodeSection.INode r = buildINodeCommon(n)
      .setType(INodeSection.INode.Type.FILE).setFile(b).build();
  r.writeDelimitedTo(out);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:FSImageFormatPBINode.java

示例3: dumpINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void dumpINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  out.print("<INodeSection>");
  o("lastInodeId", s.getLastInodeId());
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    out.print("<inode>");
    o("id", p.getId()).o("type", p.getType()).o("name",
        p.getName().toStringUtf8());

    if (p.hasFile()) {
      dumpINodeFile(p.getFile());
    } else if (p.hasDirectory()) {
      dumpINodeDirectory(p.getDirectory());
    } else if (p.hasSymlink()) {
      dumpINodeSymlink(p.getSymlink());
    }

    out.print("</inode>\n");
  }
  out.print("</INodeSection>\n");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:PBImageXmlWriter.java

示例4: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in, StartupProgress prog,
    Step currentStep) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.dir.resetLastInodeId(s.getLastInodeId());
  long numInodes = s.getNumInodes();
  LOG.info("Loading " + numInodes + " INodes.");
  prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
  for (int i = 0; i < numInodes; ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
    counter.increment();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:FSImageFormatPBINode.java

示例5: loadRootINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final QuotaCounts q = root.getQuotaCounts();
  final long nsQuota = q.getNameSpace();
  final long dsQuota = q.getStorageSpace();
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
  if (typeQuotas.anyGreaterOrEqual(0)) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  final AclFeature af = root.getFeature(AclFeature.class);
  if (af != null) {
    dir.rootDir.addAclFeature(af);
  }
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
  dir.addRootDirToEncryptionZone(f);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:26,代码来源:FSImageFormatPBINode.java

示例6: save

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void save(OutputStream out, INodeFile n) throws IOException {
  INodeSection.INodeFile.Builder b = buildINodeFile(n,
      parent.getSaverContext());
  BlockInfo[] blocks = n.getBlocks();

  if (blocks != null) {
    for (Block block : n.getBlocks()) {
      b.addBlocks(PBHelperClient.convert(block));
    }
  }

  FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
  if (uc != null) {
    INodeSection.FileUnderConstructionFeature f =
        INodeSection.FileUnderConstructionFeature
        .newBuilder().setClientName(uc.getClientName())
        .setClientMachine(uc.getClientMachine()).build();
    b.setFileUC(f);
  }

  INodeSection.INode r = buildINodeCommon(n)
      .setType(INodeSection.INode.Type.FILE).setFile(b).build();
  r.writeDelimitedTo(out);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:FSImageFormatPBINode.java

示例7: loadINodeDirectory

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
    LoaderContext state) {
  assert n.getType() == INodeSection.INode.Type.DIRECTORY;
  INodeSection.INodeDirectory d = n.getDirectory();

  final PermissionStatus permissions = loadPermission(d.getPermission(),
      state.getStringTable());
  final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
      .toByteArray(), permissions, d.getModificationTime());

  final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota();
  if (nsQuota >= 0 || dsQuota >= 0) {
    dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
  }

  if (d.hasAcl()) {
    dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
        state.getStringTable())));
  }
  if (d.hasXAttrs()) {
    dir.addXAttrFeature(new XAttrFeature(
        loadXAttrs(d.getXAttrs(), state.getStringTable())));
  }
  return dir;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:26,代码来源:FSImageFormatPBINode.java

示例8: loadRootINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void loadRootINode(INodeSection.INode p) {
  INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
  final Quota.Counts q = root.getQuotaCounts();
  final long nsQuota = q.get(Quota.NAMESPACE);
  final long dsQuota = q.get(Quota.DISKSPACE);
  if (nsQuota != -1 || dsQuota != -1) {
    dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
  }
  dir.rootDir.cloneModificationTime(root);
  dir.rootDir.clonePermissionStatus(root);
  // root dir supports having extended attributes according to POSIX
  final XAttrFeature f = root.getXAttrFeature();
  if (f != null) {
    dir.rootDir.addXAttrFeature(f);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:17,代码来源:FSImageFormatPBINode.java

示例9: loadINodeDirectory

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
    LoaderContext state) {
  assert n.getType() == INodeSection.INode.Type.DIRECTORY;
  INodeSection.INodeDirectory d = n.getDirectory();

  final PermissionStatus permissions = loadPermission(d.getPermission(),
      state.getStringTable());
  final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
      .toByteArray(), permissions, d.getModificationTime());

  final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota();
  if (nsQuota >= 0 || dsQuota >= 0) {
    dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota);
  }

  if (d.hasAcl()) {
    dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(),
        state.getStringTable())));
  }
  return dir;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:22,代码来源:FSImageFormatPBINode.java

示例10: save

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void save(OutputStream out, INodeFile n) throws IOException {
  INodeSection.INodeFile.Builder b = buildINodeFile(n,
      parent.getSaverContext());

  for (Block block : n.getBlocks()) {
    b.addBlocks(PBHelper.convert(block));
  }

  FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
  if (uc != null) {
    INodeSection.FileUnderConstructionFeature f =
        INodeSection.FileUnderConstructionFeature
        .newBuilder().setClientName(uc.getClientName())
        .setClientMachine(uc.getClientMachine()).build();
    b.setFileUC(f);
  }

  INodeSection.INode r = buildINodeCommon(n)
      .setType(INodeSection.INode.Type.FILE).setFile(b).build();
  r.writeDelimitedTo(out);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:22,代码来源:FSImageFormatPBINode.java

示例11: loadINodeSection

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
void loadINodeSection(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  fsn.dir.resetLastInodeId(s.getLastInodeId());
  LOG.info("Loading " + s.getNumInodes() + " INodes.");
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getId() == INodeId.ROOT_INODE_ID) {
      loadRootINode(p);
    } else {
      INode n = loadINode(p);
      dir.addToInodeMap(n);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FSImageFormatPBINode.java

示例12: loadINode

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private INode loadINode(INodeSection.INode n) {
  switch (n.getType()) {
  case FILE:
    return loadINodeFile(n);
  case DIRECTORY:
    return loadINodeDirectory(n, parent.getLoaderContext());
  case SYMLINK:
    return loadINodeSymlink(n);
  default:
    break;
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:FSImageFormatPBINode.java

示例13: loadINodeSymlink

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private INodeSymlink loadINodeSymlink(INodeSection.INode n) {
  assert n.getType() == INodeSection.INode.Type.SYMLINK;
  INodeSection.INodeSymlink s = n.getSymlink();
  final PermissionStatus permissions = loadPermission(s.getPermission(),
      parent.getLoaderContext().getStringTable());
  INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
      permissions, s.getModificationTime(), s.getAccessTime(),
      s.getTarget().toStringUtf8());
  return sym;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:FSImageFormatPBINode.java

示例14: run

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private void run(InputStream in) throws IOException {
  INodeSection s = INodeSection.parseDelimitedFrom(in);
  for (int i = 0; i < s.getNumInodes(); ++i) {
    INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
    if (p.getType() == INodeSection.INode.Type.FILE) {
      ++totalFiles;
      INodeSection.INodeFile f = p.getFile();
      totalBlocks += f.getBlocksCount();
      long fileSize = 0;
      for (BlockProto b : f.getBlocksList()) {
        fileSize += b.getNumBytes();
      }
      maxFileSize = Math.max(fileSize, maxFileSize);
      totalSpace += fileSize * f.getReplication();

      int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
          .ceil((double)fileSize / steps);
      ++distribution[bucket];

    } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
      ++totalDirectories;
    }

    if (i % (1 << 20) == 0) {
      out.println("Processed " + i + " inodes.");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:FileDistributionCalculator.java

示例15: loadINodeSymlink

import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; //导入方法依赖的package包/类
private INodeSymlink loadINodeSymlink(INodeSection.INode n) {
  assert n.getType() == INodeSection.INode.Type.SYMLINK;
  INodeSection.INodeSymlink s = n.getSymlink();
  final PermissionStatus permissions = loadPermission(s.getPermission(),
      parent.getLoaderContext().getStringTable());

  INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
      permissions, s.getModificationTime(), s.getAccessTime(),
      s.getTarget().toStringUtf8());

  return sym;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:13,代码来源:FSImageFormatPBINode.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。