当前位置: 首页>>代码示例>>Java>>正文


Java INode.asDirectory方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.INode.asDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java INode.asDirectory方法的具体用法?Java INode.asDirectory怎么用?Java INode.asDirectory使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.INode的用法示例。


在下文中一共展示了INode.asDirectory方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: findLatestSnapshot

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Find the latest snapshot that 1) covers the given inode (which means the
 * snapshot was either taken on the inode or taken on an ancestor of the
 * inode), and 2) was taken before the given snapshot (if the given snapshot 
 * is not null).
 * 
 * @param inode the given inode that the returned snapshot needs to cover
 * @param anchor the returned snapshot should be taken before this given id.
 * @return id of the latest snapshot that covers the given inode and was taken 
 *         before the the given snapshot (if it is not null).
 */
public static int findLatestSnapshot(INode inode, final int anchor) {
  int latest = NO_SNAPSHOT_ID;
  for(; inode != null; inode = inode.getParent()) {
    if (inode.isDirectory()) {
      final INodeDirectory dir = inode.asDirectory();
      if (dir.isWithSnapshot()) {
        latest = dir.getDiffs().updatePrior(anchor, latest);
      }
    }
  }
  return latest;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:Snapshot.java

示例2: read

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
static Snapshot read(DataInput in, FSImageFormat.Loader loader)
    throws IOException {
  final int snapshotId = in.readInt();
  final INode root = loader.loadINodeWithLocalName(false, in, false);
  return new Snapshot(snapshotId, root.asDirectory(), null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:Snapshot.java

示例3: destroyDstSubtree

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Destroy a subtree under a DstReference node.
 */
public static void destroyDstSubtree(
    final BlockStoragePolicySuite bsps, INode inode, final int snapshot,
    final int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) throws QuotaExceededException {
  Preconditions.checkArgument(prior != Snapshot.NO_SNAPSHOT_ID);
  if (inode.isReference()) {
    if (inode instanceof INodeReference.WithName
        && snapshot != Snapshot.CURRENT_STATE_ID) {
      // this inode has been renamed before the deletion of the DstReference
      // subtree
      inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
    } else { 
      // for DstReference node, continue this process to its subtree
      destroyDstSubtree(bsps, inode.asReference().getReferredINode(), snapshot,
          prior, collectedBlocks, removedINodes);
    }
  } else if (inode.isFile()) {
    inode.cleanSubtree(bsps, snapshot, prior, collectedBlocks, removedINodes);
  } else if (inode.isDirectory()) {
    Map<INode, INode> excludedNodes = null;
    INodeDirectory dir = inode.asDirectory();
    DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
    if (sf != null) {
      DirectoryDiffList diffList = sf.getDiffs();
      DirectoryDiff priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
        excludedNodes = cloneDiffList(dList);
      }
      
      if (snapshot != Snapshot.CURRENT_STATE_ID) {
        diffList.deleteSnapshotDiff(bsps, snapshot, prior, dir, collectedBlocks,
            removedINodes);
      }
      priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        priorDiff.diff.destroyCreatedList(bsps, dir, collectedBlocks,
            removedINodes);
      }
    }
    for (INode child : inode.asDirectory().getChildrenList(prior)) {
      if (excludedNodes != null && excludedNodes.containsKey(child)) {
        continue;
      }
      destroyDstSubtree(bsps, child, snapshot, prior, collectedBlocks,
          removedINodes);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:DirectoryWithSnapshotFeature.java

示例4: cleanDeletedINode

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Clean an inode while we move it from the deleted list of post to the
 * deleted list of prior.
 * @param bsps The block storage policy suite.
 * @param inode The inode to clean.
 * @param post The post snapshot.
 * @param prior The id of the prior snapshot.
 * @param collectedBlocks Used to collect blocks for later deletion.
 * @return Quota usage update.
 */
private static QuotaCounts cleanDeletedINode(
    final BlockStoragePolicySuite bsps, INode inode,
    final int post, final int prior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  Deque<INode> queue = new ArrayDeque<INode>();
  queue.addLast(inode);
  while (!queue.isEmpty()) {
    INode topNode = queue.pollFirst();
    if (topNode instanceof INodeReference.WithName) {
      INodeReference.WithName wn = (INodeReference.WithName) topNode;
      if (wn.getLastSnapshotId() >= post) {
        INodeReference.WithCount wc =
            (INodeReference.WithCount) wn.getReferredINode();
        if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
          // this wn is the last wn inside of the wc, also the dstRef node has
          // been deleted. In this case, we should treat the referred file/dir
          // as normal case
          queue.add(wc.getReferredINode());
        } else {
          wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
        }
      }
      // For DstReference node, since the node is not in the created list of
      // prior, we should treat it as regular file/dir
    } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
      INodeFile file = topNode.asFile();
      counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file,
          collectedBlocks, removedINodes));
    } else if (topNode.isDirectory()) {
      INodeDirectory dir = topNode.asDirectory();
      ChildrenDiff priorChildrenDiff = null;
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        // delete files/dirs created after prior. Note that these
        // files/dirs, along with inode, were deleted right after post.
        DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          priorChildrenDiff = priorDiff.getChildrenDiff();
          counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
              collectedBlocks, removedINodes));
        }
      }
      
      for (INode child : dir.getChildrenList(prior)) {
        if (priorChildrenDiff != null
            && priorChildrenDiff.search(ListType.DELETED,
                child.getLocalNameBytes()) != null) {
          continue;
        }
        queue.addLast(child);
      }
    }
  }
  return counts;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:DirectoryWithSnapshotFeature.java

示例5: rescanCacheDirectives

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:CacheReplicationMonitor.java

示例6: testRenameAndDelete

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
@Test
public void testRenameAndDelete() throws IOException {
  final Path foo = new Path("/foo");
  final Path x = new Path(foo, "x");
  final Path y = new Path(foo, "y");
  final Path trash = new Path("/trash");
  hdfs.mkdirs(x);
  hdfs.mkdirs(y);
  final long parentId = fsdir.getINode4Write(y.toString()).getId();

  hdfs.mkdirs(trash);
  hdfs.allowSnapshot(foo);
  // 1. create snapshot s0
  hdfs.createSnapshot(foo, "s0");
  // 2. create file /foo/x/bar
  final Path file = new Path(x, "bar");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, (short) 1, 0L);
  final long fileId = fsdir.getINode4Write(file.toString()).getId();
  // 3. move file into /foo/y
  final Path newFile = new Path(y, "bar");
  hdfs.rename(file, newFile);
  // 4. create snapshot s1
  hdfs.createSnapshot(foo, "s1");
  // 5. move /foo/y to /trash
  final Path deletedY = new Path(trash, "y");
  hdfs.rename(y, deletedY);
  // 6. create snapshot s2
  hdfs.createSnapshot(foo, "s2");
  // 7. delete /trash/y
  hdfs.delete(deletedY, true);
  // 8. delete snapshot s1
  hdfs.deleteSnapshot(foo, "s1");

  // make sure bar has been removed from its parent
  INode p = fsdir.getInode(parentId);
  Assert.assertNotNull(p);
  INodeDirectory pd = p.asDirectory();
  Assert.assertNotNull(pd);
  Assert.assertNull(pd.getChild("bar".getBytes(), Snapshot.CURRENT_STATE_ID));

  // make sure bar has been cleaned from inodeMap
  Assert.assertNull(fsdir.getInode(fileId));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestSnapshotDeletion.java

示例7: testRenameDirAndDeleteSnapshot_6

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Rename and deletion snapshot under the same the snapshottable directory.
 */
@Test
public void testRenameDirAndDeleteSnapshot_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  
  final Path foo = new Path(dir2, "foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
  
  // take a snapshot on /test
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  
  // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a 
  // snapshot copy recorded in bar
  hdfs.delete(file, true);
  
  // rename foo from dir2 to dir1
  final Path newfoo = new Path(dir1, foo.getName());
  hdfs.rename(foo, newfoo);
  
  final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "dir2/foo");
  assertTrue("the snapshot path " + foo_s0 + " should exist",
      hdfs.exists(foo_s0));
  
  // delete snapshot s0. The deletion will first go down through dir1, and 
  // find foo in the created list of dir1. Then it will use null as the prior
  // snapshot and continue the snapshot deletion process in the subtree of 
  // foo. We need to make sure the snapshot s0 can be deleted cleanly in the
  // foo subtree.
  hdfs.deleteSnapshot(test, "s0");
  // check the internal
  assertFalse("after deleting s0, " + foo_s0 + " should not exist",
      hdfs.exists(foo_s0));
  INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
      .asDirectory();
  assertTrue("the diff list of " + dir2
      + " should be empty after deleting s0", dir2Node.getDiffs().asList()
      .isEmpty());
  
  assertTrue(hdfs.exists(newfoo));
  INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
  assertTrue(fooRefNode instanceof INodeReference.DstReference);
  INodeDirectory fooNode = fooRefNode.asDirectory();
  // fooNode should be still INodeDirectory (With Snapshot) since we call
  // recordModification before the rename
  assertTrue(fooNode.isWithSnapshot());
  assertTrue(fooNode.getDiffs().asList().isEmpty());
  INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
      .get(0).asDirectory();
  // bar should also be INodeDirectory (With Snapshot), and both of its diff 
  // list and children list are empty 
  assertTrue(barNode.getDiffs().asList().isEmpty());
  assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
  
  restartClusterAndCheckImage(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:TestRenameWithSnapshots.java

示例8: destroyDstSubtree

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Destroy a subtree under a DstReference node.
 */
public static void destroyDstSubtree(INode.ReclaimContext reclaimContext,
    INode inode, final int snapshot, final int prior) {
  Preconditions.checkArgument(prior != NO_SNAPSHOT_ID);
  if (inode.isReference()) {
    if (inode instanceof INodeReference.WithName
        && snapshot != Snapshot.CURRENT_STATE_ID) {
      // this inode has been renamed before the deletion of the DstReference
      // subtree
      inode.cleanSubtree(reclaimContext, snapshot, prior);
    } else {
      // for DstReference node, continue this process to its subtree
      destroyDstSubtree(reclaimContext,
          inode.asReference().getReferredINode(), snapshot, prior);
    }
  } else if (inode.isFile()) {
    inode.cleanSubtree(reclaimContext, snapshot, prior);
  } else if (inode.isDirectory()) {
    Map<INode, INode> excludedNodes = null;
    INodeDirectory dir = inode.asDirectory();
    DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
    if (sf != null) {
      DirectoryDiffList diffList = sf.getDiffs();
      DirectoryDiff priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
        excludedNodes = cloneDiffList(dList);
      }
      
      if (snapshot != Snapshot.CURRENT_STATE_ID) {
        diffList.deleteSnapshotDiff(reclaimContext,
            snapshot, prior, dir);
      }
      priorDiff = diffList.getDiffById(prior);
      if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
        priorDiff.diff.destroyCreatedList(reclaimContext, dir);
      }
    }
    for (INode child : inode.asDirectory().getChildrenList(prior)) {
      if (excludedNodes != null && excludedNodes.containsKey(child)) {
        continue;
      }
      destroyDstSubtree(reclaimContext, child, snapshot, prior);
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:49,代码来源:DirectoryWithSnapshotFeature.java

示例9: cleanDeletedINode

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Clean an inode while we move it from the deleted list of post to the
 * deleted list of prior.
 * @param reclaimContext blocks and inodes that need to be reclaimed
 * @param inode The inode to clean.
 * @param post The post snapshot.
 * @param prior The id of the prior snapshot.
 */
private static void cleanDeletedINode(INode.ReclaimContext reclaimContext,
    INode inode, final int post, final int prior) {
  Deque<INode> queue = new ArrayDeque<>();
  queue.addLast(inode);
  while (!queue.isEmpty()) {
    INode topNode = queue.pollFirst();
    if (topNode instanceof INodeReference.WithName) {
      INodeReference.WithName wn = (INodeReference.WithName) topNode;
      if (wn.getLastSnapshotId() >= post) {
        wn.cleanSubtree(reclaimContext, post, prior);
      }
      // For DstReference node, since the node is not in the created list of
      // prior, we should treat it as regular file/dir
    } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
      INodeFile file = topNode.asFile();
      file.getDiffs().deleteSnapshotDiff(reclaimContext, post, prior, file);
    } else if (topNode.isDirectory()) {
      INodeDirectory dir = topNode.asDirectory();
      ChildrenDiff priorChildrenDiff = null;
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        // delete files/dirs created after prior. Note that these
        // files/dirs, along with inode, were deleted right after post.
        DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          priorChildrenDiff = priorDiff.getChildrenDiff();
          priorChildrenDiff.destroyCreatedList(reclaimContext, dir);
        }
      }

      for (INode child : dir.getChildrenList(prior)) {
        if (priorChildrenDiff != null && priorChildrenDiff.search(
            ListType.DELETED, child.getLocalNameBytes()) != null) {
          continue;
        }
        queue.addLast(child);
      }
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:49,代码来源:DirectoryWithSnapshotFeature.java

示例10: cleanDeletedINode

import org.apache.hadoop.hdfs.server.namenode.INode; //导入方法依赖的package包/类
/**
 * Clean an inode while we move it from the deleted list of post to the
 * deleted list of prior.
 * @param bsps The block storage policy suite.
 * @param inode The inode to clean.
 * @param post The post snapshot.
 * @param prior The id of the prior snapshot.
 * @param collectedBlocks Used to collect blocks for later deletion.
 * @return Quota usage update.
 */
private static QuotaCounts cleanDeletedINode(
    final BlockStoragePolicySuite bsps, INode inode,
    final int post, final int prior,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  Deque<INode> queue = new ArrayDeque<INode>();
  queue.addLast(inode);
  while (!queue.isEmpty()) {
    INode topNode = queue.pollFirst();
    if (topNode instanceof INodeReference.WithName) {
      INodeReference.WithName wn = (INodeReference.WithName) topNode;
      if (wn.getLastSnapshotId() >= post) {
        wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
      }
      // For DstReference node, since the node is not in the created list of
      // prior, we should treat it as regular file/dir
    } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
      INodeFile file = topNode.asFile();
      counts.add(file.getDiffs().deleteSnapshotDiff(bsps, post, prior, file,
          collectedBlocks, removedINodes));
    } else if (topNode.isDirectory()) {
      INodeDirectory dir = topNode.asDirectory();
      ChildrenDiff priorChildrenDiff = null;
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        // delete files/dirs created after prior. Note that these
        // files/dirs, along with inode, were deleted right after post.
        DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          priorChildrenDiff = priorDiff.getChildrenDiff();
          counts.add(priorChildrenDiff.destroyCreatedList(bsps, dir,
              collectedBlocks, removedINodes));
        }
      }
      
      for (INode child : dir.getChildrenList(prior)) {
        if (priorChildrenDiff != null
            && priorChildrenDiff.search(ListType.DELETED,
                child.getLocalNameBytes()) != null) {
          continue;
        }
        queue.addLast(child);
      }
    }
  }
  return counts;
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:59,代码来源:DirectoryWithSnapshotFeature.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.INode.asDirectory方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。