当前位置: 首页>>代码示例>>Java>>正文


Java Snapshot.Root方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root方法的典型用法代码示例。如果您正苦于以下问题:Java Snapshot.Root方法的具体用法?Java Snapshot.Root怎么用?Java Snapshot.Root使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot的用法示例。


在下文中一共展示了Snapshot.Root方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSnapshotsListing

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:FSDirStatAndListingOp.java

示例2: getSnapshotsListing

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    INodeAttributes nodeAttrs = getINodeAttributes(
        fsd, src, sRoot.getLocalNameBytes(),
        node, Snapshot.CURRENT_STATE_ID);
    listing[i] = createFileStatus(
        fsd, sRoot.getLocalNameBytes(),
        sRoot, nodeAttrs,
        HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
        Snapshot.CURRENT_STATE_ID, false,
        INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:FSDirStatAndListingOp.java

示例3: dumpTreeRecursively

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入方法依赖的package包/类
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
    final int snapshot) {
  super.dumpTreeRecursively(out, prefix, snapshot);
  out.print(", childrenSize=" + getChildrenList(snapshot).size());
  final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
  if (q != null) {
    out.print(", " + q);
  }
  if (this instanceof Snapshot.Root) {
    out.print(", snapshotId=" + snapshot);
  }
  out.println();

  if (prefix.length() >= 2) {
    prefix.setLength(prefix.length() - 2);
    prefix.append("  ");
  }
  dumpTreeRecursively(out, prefix, new Iterable<SnapshotAndINode>() {
    final Iterator<INode> i = getChildrenList(snapshot).iterator();
    
    @Override
    public Iterator<SnapshotAndINode> iterator() {
      return new Iterator<SnapshotAndINode>() {
        @Override
        public boolean hasNext() {
          return i.hasNext();
        }

        @Override
        public SnapshotAndINode next() {
          return new SnapshotAndINode(snapshot, i.next());
        }

        @Override
        public void remove() {
          throw new UnsupportedOperationException();
        }
      };
    }
  });

  final DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
  if (s != null) {
    s.dumpTreeRecursively(this, out, prefix, snapshot);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:INodeDirectory.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。