当前位置: 首页>>代码示例>>Java>>正文


Java DirectoryDiff类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff的典型用法代码示例。如果您正苦于以下问题:Java DirectoryDiff类的具体用法?Java DirectoryDiff怎么用?Java DirectoryDiff使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DirectoryDiff类属于org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot包,在下文中一共展示了DirectoryDiff类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadCreated

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Load a node stored in the created list from fsimage.
 * @param createdNodeName The name of the created node.
 * @param parent The directory that the created list belongs to.
 * @return The created node.
 */
private static INode loadCreated(byte[] createdNodeName,
    INodeDirectoryWithSnapshot parent) throws IOException {
  // the INode in the created list should be a reference to another INode
  // in posterior SnapshotDiffs or one of the current children
  for (DirectoryDiff postDiff : parent.getDiffs()) {
    final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,
        createdNodeName);
    if (d != null) {
      return d;
    } // else go to the next SnapshotDiff
  } 
  // use the current child
  INode currentChild = parent.getChild(createdNodeName, null);
  if (currentChild == null) {
    throw new IOException("Cannot find an INode associated with the INode "
        + DFSUtil.bytes2String(createdNodeName)
        + " in created list while loading FSImage.");
  }
  return currentChild;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:SnapshotFSImageFormat.java

示例2: loadDirectoryDiff

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Load {@link DirectoryDiff} from fsimage.
 * @param parent The directory that the SnapshotDiff belongs to.
 * @param in The {@link DataInput} instance to read.
 * @param loader The {@link Loader} instance that this loading procedure is 
 *               using.
 * @return A {@link DirectoryDiff}.
 */
private static DirectoryDiff loadDirectoryDiff(
    INodeDirectoryWithSnapshot parent, DataInput in,
    FSImageFormat.Loader loader) throws IOException {
  // 1. Read the full path of the Snapshot root to identify the Snapshot
  final Snapshot snapshot = loader.getSnapshot(in);

  // 2. Load DirectoryDiff#childrenSize
  int childrenSize = in.readInt();
  
  // 3. Load DirectoryDiff#snapshotINode 
  INodeDirectoryAttributes snapshotINode = loadSnapshotINodeInDirectoryDiff(
      snapshot, in, loader);
  
  // 4. Load the created list in SnapshotDiff#Diff
  List<INode> createdList = loadCreatedList(parent, in);
  
  // 5. Load the deleted list in SnapshotDiff#Diff
  List<INode> deletedList = loadDeletedList(parent, createdList, in, loader);
  
  // 6. Compose the SnapshotDiff
  List<DirectoryDiff> diffs = parent.getDiffs().asList();
  DirectoryDiff sdiff = new DirectoryDiff(snapshot, snapshotINode,
      diffs.isEmpty() ? null : diffs.get(0),
      childrenSize, createdList, deletedList);
  return sdiff;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:35,代码来源:SnapshotFSImageFormat.java

示例3: checkSnapshotList

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Check the correctness of snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
    String[] sortedNames, String[] names) {
  ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
  assertEquals(sortedNames.length, listByName.size());
  for (int i = 0; i < listByName.size(); i++) {
    assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
  }
  List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
  assertEquals(names.length, listByTime.size());
  for (int i = 0; i < listByTime.size(); i++) {
    assertEquals(names[i], listByTime.get(i).getSnapshot().getRoot().getLocalName());
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestSnapshotRename.java

示例4: testSnapshotOnRoot

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test when there is snapshot taken on root
 */
@Test
public void testSnapshotOnRoot() throws Exception {
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  
  INodeDirectorySnapshottable rootNode = 
      (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
  assertTrue("The children list of root should be empty", 
      rootNode.getChildrenList(null).isEmpty());
  // one snapshot on root: s1
  List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s1", diffList.get(0).getSnapshot().getRoot().getLocalName());
  
  // check SnapshotManager's snapshottable directory list
  assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
  SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
      .getSnapshottableDirListing(null);
  assertEquals(root, sdirs[0].getFullPath());
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:54,代码来源:TestFSImageWithSnapshot.java

示例5: testRenameUndo_1

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir before taking the snapshot.
 */
@Test
public void testRenameUndo_1() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  
  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
  
  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());
  
  // after the undo of rename, both the created and deleted list of sdir1
  // should be empty
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
  
  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> fooDiffs = ((INodeDirectoryWithSnapshot) fooNode)
      .getDiffs().asList();
  assertEquals(1, fooDiffs.size());
  assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
  assertTrue(fooNode_s1 == fooNode);
  
  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:66,代码来源:TestRenameWithSnapshots.java

示例6: testRenameUndo_2

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test the undo section of rename. Before the rename, we create the renamed 
 * file/dir after taking the snapshot.
 */
@Test
public void testRenameUndo_2() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path dir2file = new Path(sdir2, "file");
  DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  
  // create foo after taking snapshot
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
  INodeDirectory mockDir2 = spy(dir2);
  doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
          (Snapshot) anyObject(), (INodeMap) anyObject());
  INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
  root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
  
  final Path newfoo = new Path(sdir2, "foo");
  boolean result = hdfs.rename(foo, newfoo);
  assertFalse(result);
  
  // check the current internal details
  INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
      .getINode4Write(sdir1.toString());
  ReadOnlyList<INode> dir1Children = dir1Node.getChildrenList(null);
  assertEquals(1, dir1Children.size());
  assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
  List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
  assertEquals(1, dir1Diffs.size());
  assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName());
  
  // after the undo of rename, the created list of sdir1 should contain 
  // 1 element
  ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
  assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
  assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
  
  INode fooNode = fsdir.getINode4Write(foo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
  assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
  assertFalse(hdfs.exists(foo_s1));
  
  // check sdir2
  assertFalse(hdfs.exists(newfoo));
  INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
      .asDirectory();
  assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot);
  ReadOnlyList<INode> dir2Children = dir2Node.getChildrenList(null);
  assertEquals(1, dir2Children.size());
  assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:64,代码来源:TestRenameWithSnapshots.java

示例7: testRenameUndo_5

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);
  
  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 5, so the current remaining is 2 (already has
  // dir2, subdir2, and s2)
  hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
  
  final Path foo2 = new Path(subdir2, foo.getName());
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since foo/bar only be counted 
  // as 2 in NS quota. However, the rename operation will fail when adding
  // foo to subdir2, since we will create a snapshot diff for subdir2. 
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(null));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
  INode barNode = fsdir.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
  Quota.Counts counts = dir2Node.computeQuotaUsage();
  assertEquals(3, counts.get(Quota.NAMESPACE));
  assertEquals(0, counts.get(Quota.DISKSPACE));
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(null));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir.getINode4Write(subdir2.toString()));
  diffList = ((INodeDirectorySnapshottable) dir2Node)
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:72,代码来源:TestRenameWithSnapshots.java

示例8: testRenameUndo_6

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subsub_dir2 = new Path(sub_dir2, "subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);
  
  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 0 (already has
  // dir2, sub_dir2, subsub_dir2, and s2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However, the rename operation will fail when removing
  // subsub_dir2 since this step tries to add a snapshot diff in sub_dir2.
  try {
    hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (QuotaExceededException e) {
    String msg = "Failed to record modification for snapshot: "
        + "The NameSpace quota (directories and files)"
        + " is exceeded: quota=4 file count=5"; 
    GenericTestUtils.assertExceptionContains(msg, e);
  }
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(null));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.getClass() == INodeDirectoryWithSnapshot.class);
  assertSame(dir1Node, fooNode.getParent());
  List<DirectoryDiff> diffList = ((INodeDirectorySnapshottable) dir1Node)
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INode dir2Node = fsdir.getINode4Write(dir2.toString());
  assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
  Quota.Counts counts = dir2Node.computeQuotaUsage();
  assertEquals(4, counts.get(Quota.NAMESPACE));
  assertEquals(0, counts.get(Quota.DISKSPACE));
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(null));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertTrue(subdir2Node.getClass() == INodeDirectoryWithSnapshot.class);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node, subsubdir2Node.getParent());
  
  diffList = ((INodeDirectorySnapshottable) dir2Node).getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  diffList = ((INodeDirectoryWithSnapshot) subdir2Node).getDiffs().asList();
  assertEquals(0, diffList.size());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:81,代码来源:TestRenameWithSnapshots.java

示例9: testRenameUndo_7

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test rename to an invalid name (xxx/.snapshot)
 */
@Test
public void testRenameUndo_7() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  // create a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
  
  // rename bar to /foo/.snapshot which is invalid
  final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
  try {
    hdfs.rename(bar, invalid);
    fail("expect exception since invalid name is used for rename");
  } catch (Exception e) {
    GenericTestUtils.assertExceptionContains("\"" +
        HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
  }
  
  // check
  INodeDirectoryWithSnapshot fooNode = (INodeDirectoryWithSnapshot) fsdir
      .getINode4Write(foo.toString());
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1, children.size());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  // this diff is generated while renaming
  assertEquals(snap1, Snapshot.getSnapshotName(diff.snapshot));
  // after undo, the diff should be empty
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  
  // bar was converted to filewithsnapshot while renaming
  INodeFileWithSnapshot barNode = (INodeFileWithSnapshot) fsdir
      .getINode4Write(bar.toString());
  assertSame(barNode, children.get(0));
  assertSame(fooNode, barNode.getParent());
  List<FileDiff> barDiffList = barNode.getDiffs().asList();
  assertEquals(1, barDiffList.size());
  FileDiff barDiff = barDiffList.get(0);
  assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));
  
  // restart cluster multiple times to make sure the fsimage and edits log are
  // correct. Note that when loading fsimage, foo and bar will be converted 
  // back to normal INodeDirectory and INodeFile since they do not store any 
  // snapshot data
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPL).build();
  cluster.waitActive();
  restartClusterAndCheckImage(true);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:61,代码来源:TestRenameWithSnapshots.java

示例10: testRenameDirAndDeleteSnapshot_3

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
 * -> delete snapshot s on dst tree
 * 
 * Make sure we destroy everything created after the rename under the renamed
 * dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // delete foo2
  hdfs.delete(foo2, true);
  // delete s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectorySnapshottable dir1Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
  assertEquals(4, dir1Node.getNamespace());
  final INodeDirectorySnapshottable dir2Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
  assertEquals(2, dir2Node.getNamespace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(1, wc.getReferenceCount());
  INodeDirectoryWithSnapshot fooNode = 
      (INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(1, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot));
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  assertEquals(0, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  restartClusterAndCheckImage(true);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:67,代码来源:TestRenameWithSnapshots.java

示例11: testRenameDirAndDeleteSnapshot_4

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * After the following operations:
 * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
 * again -> delete snapshot s on dst tree
 * 
 * Make sure we only delete the snapshot s under the renamed dir.
 */
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  hdfs.mkdirs(sdir2);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  
  final Path foo2 = new Path(sdir2, "foo");
  hdfs.rename(foo, foo2);
  
  // create two new files under foo2
  final Path bar2 = new Path(foo2, "bar2");
  DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
  final Path bar3 = new Path(foo2, "bar3");
  DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
  
  // create a new snapshot on sdir2
  hdfs.createSnapshot(sdir2, "s3");
  
  // rename foo2 again
  hdfs.rename(foo2, foo);
  // delete snapshot s3
  hdfs.deleteSnapshot(sdir2, "s3");
  
  // check
  final INodeDirectorySnapshottable dir1Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
  // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
  assertEquals(9, dir1Node.getNamespace());
  final INodeDirectorySnapshottable dir2Node = 
      (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
  assertEquals(2, dir2Node.getNamespace());
  
  final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
      foo.getName());
  final INode fooRef = fsdir.getINode(foo_s1.toString());
  assertTrue(fooRef instanceof INodeReference.WithName);
  INodeReference.WithCount wc = 
      (WithCount) fooRef.asReference().getReferredINode();
  assertEquals(2, wc.getReferenceCount());
  INodeDirectoryWithSnapshot fooNode = 
      (INodeDirectoryWithSnapshot) wc.getReferredINode().asDirectory();
  ReadOnlyList<INode> children = fooNode.getChildrenList(null);
  assertEquals(3, children.size());
  assertEquals(bar.getName(), children.get(0).getLocalName());
  assertEquals(bar2.getName(), children.get(1).getLocalName());
  assertEquals(bar3.getName(), children.get(2).getLocalName());
  List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s1", Snapshot.getSnapshotName(diffList.get(0).snapshot));
  ChildrenDiff diff = diffList.get(0).getChildrenDiff();
  // bar2 and bar3 in the created list
  assertEquals(2, diff.getList(ListType.CREATED).size());
  assertEquals(0, diff.getList(ListType.DELETED).size());
  
  final INode fooRef2 = fsdir.getINode4Write(foo.toString());
  assertTrue(fooRef2 instanceof INodeReference.DstReference);
  INodeReference.WithCount wc2 = 
      (WithCount) fooRef2.asReference().getReferredINode();
  assertSame(wc, wc2);
  assertSame(fooRef2, wc.getParentReference());
  
  restartClusterAndCheckImage(true);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:77,代码来源:TestRenameWithSnapshots.java

示例12: testClearQuota

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test clear quota of a snapshottable dir or a dir with snapshot.
 */
@Test
public void testClearQuota() throws Exception {
  final Path dir = new Path("/TestSnapshot");
  hdfs.mkdirs(dir);
  
  hdfs.allowSnapshot(dir);
  hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
      HdfsConstants.QUOTA_DONT_SET);
  INode dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());
  
  hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
      HdfsConstants.QUOTA_DONT_SET - 1);
  dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());
  
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());
  
  // allow snapshot on dir and create snapshot s1
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  
  // clear quota of dir
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  // dir should still be a snapshottable directory
  dirNode = fsdir.getINode4Write(dir.toString());
  assertTrue(dirNode instanceof INodeDirectorySnapshottable);
  assertEquals(1, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
      .size());
  SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
  assertEquals(1, status.length);
  assertEquals(dir, status[0].getFullPath());
  
  final Path subDir = new Path(dir, "sub");
  hdfs.mkdirs(subDir);
  hdfs.createSnapshot(dir, "s2");
  final Path file = new Path(subDir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
  INode subNode = fsdir.getINode4Write(subDir.toString());
  assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
  List<DirectoryDiff> diffList = ((INodeDirectoryWithSnapshot) subNode).getDiffs().asList();
  assertEquals(1, diffList.size());
  assertEquals("s2", Snapshot.getSnapshotName(diffList.get(0).snapshot));
  List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
  assertEquals(1, createdList.size());
  assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:59,代码来源:TestSetQuotaWithSnapshot.java

示例13: testSnapshotWhileAppending

import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; //导入依赖的package包/类
/**
 * Test snapshot during file appending, before the corresponding
 * {@link FSDataOutputStream} instance closes.
 */
@Test (timeout=60000)
public void testSnapshotWhileAppending() throws Exception {
  Path file = new Path(dir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // 1. append without closing stream --> create snapshot
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
  // deleted list, with size BLOCKSIZE*2
  INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
  INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
      .getINode(dir.toString());
  DirectoryDiff last = dirNode.getDiffs().getLast();
  Snapshot s0 = last.snapshot;
  
  // 2. append without closing stream
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  // re-check nodeInDeleted_S0
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
  
  // 3. take snapshot --> close stream
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
  // have been stored in s1's deleted list
  fileNode = (INodeFile) fsdir.getINode(file.toString());
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  last = dirNode.getDiffs().getLast();
  Snapshot s1 = last.snapshot;
  assertTrue(fileNode instanceof INodeFileWithSnapshot);
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
  
  // 4. modify file --> append without closing stream --> take snapshot -->
  // close stream
  hdfs.setReplication(file, (short) (REPLICATION - 1));
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // re-check the size of nodeInDeleted_S1
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:56,代码来源:TestINodeFileUnderConstructionWithSnapshot.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。