当前位置: 首页>>代码示例>>Java>>正文


Java ReadOnlyList.size方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.util.ReadOnlyList.size方法的典型用法代码示例。如果您正苦于以下问题:Java ReadOnlyList.size方法的具体用法?Java ReadOnlyList.size怎么用?Java ReadOnlyList.size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.util.ReadOnlyList的用法示例。


在下文中一共展示了ReadOnlyList.size方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkSnapshotList

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Check the correctness of snapshot list within snapshottable dir
 */
private void checkSnapshotList(INodeDirectory srcRoot,
    String[] sortedNames, String[] names) {
  assertTrue(srcRoot.isSnapshottable());
  ReadOnlyList<Snapshot> listByName = srcRoot
      .getDirectorySnapshottableFeature().getSnapshotList();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
        listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestSnapshotRename.java

示例2: getSnapshotsListing

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
  
  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
  
  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:29,代码来源:FSDirectory.java

示例3: getSnapshotsListing

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:FSDirStatAndListingOp.java

示例4: serializeINodeDirectorySection

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
void serializeINodeDirectorySection(OutputStream out) throws IOException {
  Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
      .getINodeMap().getMapIterator();
  final ArrayList<INodeReference> refList = parent.getSaverContext()
      .getRefList();
  int i = 0;
  while (iter.hasNext()) {
    INodeWithAdditionalFields n = iter.next();
    if (!n.isDirectory()) {
      continue;
    }

    ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
        Snapshot.CURRENT_STATE_ID);
    if (children.size() > 0) {
      INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
          DirEntry.newBuilder().setParent(n.getId());
      for (INode inode : children) {
        if (!inode.isReference()) {
          b.addChildren(inode.getId());
        } else {
          refList.add(inode.asReference());
          b.addRefChildren(refList.size() - 1);
        }
      }
      INodeDirectorySection.DirEntry e = b.build();
      e.writeDelimitedTo(out);
    }

    ++i;
    if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
      context.checkCancelled();
    }
  }
  parent.commitSection(summary,
      FSImageFormatProtobuf.SectionName.INODE_DIR);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:FSImageFormatPBINode.java

示例5: computeNeeded

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Computes the needed number of bytes and files for a path.
 * @return CacheDirectiveStats describing the needed stats for this path
 */
private CacheDirectiveStats computeNeeded(String path, short replication) {
  FSDirectory fsDir = namesystem.getFSDirectory();
  INode node;
  long requestedBytes = 0;
  long requestedFiles = 0;
  CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
  try {
    node = fsDir.getINode(path);
  } catch (UnresolvedLinkException e) {
    // We don't cache through symlinks
    return builder.build();
  }
  if (node == null) {
    return builder.build();
  }
  if (node.isFile()) {
    requestedFiles = 1;
    INodeFile file = node.asFile();
    requestedBytes = file.computeFileSize();
  } else if (node.isDirectory()) {
    INodeDirectory dir = node.asDirectory();
    ReadOnlyList<INode> children = dir
        .getChildrenList(Snapshot.CURRENT_STATE_ID);
    requestedFiles = children.size();
    for (INode child : children) {
      if (child.isFile()) {
        requestedBytes += child.asFile().computeFileSize();
      }
    }
  }
  return new CacheDirectiveStats.Builder()
      .setBytesNeeded(requestedBytes)
      .setFilesCached(requestedFiles)
      .build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:CacheManager.java

示例6: computeDirectoryContentSummary

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:INodeDirectory.java

示例7: getSnapshotsListing

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    INodeAttributes nodeAttrs = getINodeAttributes(
        fsd, src, sRoot.getLocalNameBytes(),
        node, Snapshot.CURRENT_STATE_ID);
    listing[i] = createFileStatus(
        fsd, sRoot.getLocalNameBytes(),
        sRoot, nodeAttrs,
        HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
        Snapshot.CURRENT_STATE_ID, false,
        INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:FSDirStatAndListingOp.java

示例8: computeDirectoryContentSummary

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(snapshotId, summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (!isRoot() && getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().addContent(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:37,代码来源:INodeDirectory.java

示例9: getSnapshotsListing

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
  
  final INode node = this.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, null);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:35,代码来源:FSDirectory.java

示例10: computeDirectoryContentSummary

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
protected ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary, int snapshotId) {
  ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(snapshotId);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().add(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:37,代码来源:INodeDirectory.java

示例11: getListing

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Get a partial listing of the indicated directory
 *
 * @param src the directory name
 * @param startAfter the name to start listing after
 * @param needLocation if block locations are returned
 * @return a partial listing starting after startAfter
 */
DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws UnresolvedLinkException, IOException {
  String srcs = normalizePath(src);

  readLock();
  try {
    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
      return getSnapshotsListing(srcs, startAfter);
    }
    final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true);
    final Snapshot snapshot = inodesInPath.getPathSnapshot();
    final INode targetNode = inodesInPath.getINode(0);
    if (targetNode == null)
      return null;
    
    if (!targetNode.isDirectory()) {
      return new DirectoryListing(
          new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME,
              targetNode, needLocation, snapshot)}, 0);
    }

    final INodeDirectory dirInode = targetNode.asDirectory();
    final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
    int startChild = INodeDirectory.nextChild(contents, startAfter);
    int totalNumChildren = contents.size();
    int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
    HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i=0; i<numOfListing; i++) {
      INode cur = contents.get(startChild+i);
      listing[i] = createFileStatus(cur.getLocalNameBytes(), cur,
          needLocation, snapshot);
    }
    return new DirectoryListing(
        listing, totalNumChildren-startChild-numOfListing);
  } finally {
    readUnlock();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:47,代码来源:FSDirectory.java

示例12: checkSnapshotList

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Check the correctness of snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
    String[] sortedNames, String[] names) {
  ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    assertEquals(names[i], listByTime.get(i).getSnapshot().getRoot().getLocalName());
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestSnapshotRename.java

示例13: computeDirectoryContentSummary

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
ContentSummaryComputationContext computeDirectoryContentSummary(
    ContentSummaryComputationContext summary) {
  ReadOnlyList<INode> childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID);
  // Explicit traversing is done to enable repositioning after relinquishing
  // and reacquiring locks.
  for (int i = 0;  i < childrenList.size(); i++) {
    INode child = childrenList.get(i);
    byte[] childName = child.getLocalNameBytes();

    long lastYieldCount = summary.getYieldCount();
    child.computeContentSummary(summary);

    // Check whether the computation was paused in the subtree.
    // The counts may be off, but traversing the rest of children
    // should be made safe.
    if (lastYieldCount == summary.getYieldCount()) {
      continue;
    }
    // The locks were released and reacquired. Check parent first.
    if (getParent() == null) {
      // Stop further counting and return whatever we have so far.
      break;
    }
    // Obtain the children list again since it may have been modified.
    childrenList = getChildrenList(Snapshot.CURRENT_STATE_ID);
    // Reposition in case the children list is changed. Decrement by 1
    // since it will be incremented when loops.
    i = nextChild(childrenList, childName) - 1;
  }

  // Increment the directory count for this directory.
  summary.getCounts().add(Content.DIRECTORY, 1);
  // Relinquish and reacquire locks if necessary.
  summary.yield();
  return summary;
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:37,代码来源:INodeDirectory.java

示例14: getSnapshotsListing

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private DirectoryListing getSnapshotsListing(String src, byte[] startAfter)
    throws UnresolvedLinkException, IOException {
  Preconditions.checkState(hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
  
  final String dirPath = normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
  
  final INode node = this.getINode(dirPath);
  final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable
      .valueOf(node, dirPath);
  final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit);
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
        Snapshot.CURRENT_STATE_ID);
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:30,代码来源:FSDirectory.java

示例15: checkSnapshotList

import org.apache.hadoop.hdfs.util.ReadOnlyList; //导入方法依赖的package包/类
/**
 * Check the correctness of snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
    String[] sortedNames, String[] names) {
  ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId());
    assertEquals(names[i], s.getRoot().getLocalName());
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:19,代码来源:TestSnapshotRename.java


注:本文中的org.apache.hadoop.hdfs.util.ReadOnlyList.size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。